diff --git a/.github/workflows/docker-reusable.yml b/.github/workflows/docker-reusable.yml index f0e46d95..7d937f78 100644 --- a/.github/workflows/docker-reusable.yml +++ b/.github/workflows/docker-reusable.yml @@ -94,11 +94,11 @@ jobs: - target: os: linux arch: amd64 - builder: ubuntu-22.04 + builder: ubuntu-24.04 - target: os: linux arch: arm64 - builder: ubuntu-22.04-arm + builder: ubuntu-24.04-arm name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }} runs-on: ${{ matrix.builder }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 50b14d05..4232ff0f 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -2,17 +2,17 @@ name: OpenAPI on: push: - branches: - - 'master' + tags: + - "v*.*.*" paths: - - 'openapi.yaml' - - '.github/workflows/docs.yml' + - "openapi.yaml" + - ".github/workflows/docs.yml" pull_request: branches: - - '**' + - "**" paths: - - 'openapi.yaml' - - '.github/workflows/docs.yml' + - "openapi.yaml" + - ".github/workflows/docs.yml" # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: @@ -40,7 +40,7 @@ jobs: deploy: name: Deploy runs-on: ubuntu-latest - if: github.ref == 'refs/heads/master' + if: startsWith(github.ref, 'refs/tags/') steps: - name: Checkout uses: actions/checkout@v4 diff --git a/build.nims b/build.nims index aa090e71..baf21e03 100644 --- a/build.nims +++ b/build.nims @@ -4,7 +4,6 @@ import std/os except commandLineParams ### Helper functions proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = - if not dirExists "build": mkDir "build" @@ -14,13 +13,15 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = for param in commandLineParams(): extra_params &= " " & param else: - for i in 2.. 0'nb: cache = CacheStore.new(cacheSize = config.cacheSize) @@ -286,6 +301,7 @@ proc new*( engine = engine, discovery = discovery, prover = prover, + taskPool = taskpool, ) restServer = RestServerRef diff --git a/codex/conf.nim b/codex/conf.nim index 6d47f8f4..2a859efb 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -44,6 +44,7 @@ import ./utils import ./nat import ./utils/natutils +from ./contracts/config import DefaultRequestCacheSize from ./validationconfig import MaxSlots, ValidationGroups export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig @@ -51,7 +52,11 @@ export ValidationGroups, MaxSlots export DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, - DefaultNumberOfBlocksToMaintainPerInterval + DefaultNumberOfBlocksToMaintainPerInterval, DefaultRequestCacheSize + +type ThreadCount* = distinct Natural + +proc `==`*(a, b: ThreadCount): bool {.borrow.} proc defaultDataDir*(): string = let dataDir = @@ -71,6 +76,7 @@ const DefaultDataDir* = defaultDataDir() DefaultCircuitDir* = defaultDataDir() / "circuits" + DefaultThreadCount* = ThreadCount(0) type StartUpCmd* {.pure.} = enum @@ -184,6 +190,13 @@ type name: "max-peers" .}: int + numThreads* {. + desc: + "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)", + defaultValue: DefaultThreadCount, + name: "num-threads" + .}: ThreadCount + agentString* {. defaultValue: "Codex", desc: "Node agent string which is used as identifier in network", @@ -347,6 +360,16 @@ type name: "reward-recipient" .}: Option[EthAddress] + marketplaceRequestCacheSize* {. + desc: + "Maximum number of StorageRequests kept in memory." & + "Reduces fetching of StorageRequest data from the contract.", + defaultValue: DefaultRequestCacheSize, + defaultValueDesc: $DefaultRequestCacheSize, + name: "request-cache-size", + hidden + .}: uint16 + case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd of PersistenceCmd.prover: circuitDir* {. @@ -482,6 +505,13 @@ proc parseCmdArg*( quit QuitFailure ma +proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = + let count = parseInt(input) + if count != 0 and count < 2: + warn "Invalid number of threads", input = input + quit QuitFailure + ThreadCount(count) + proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = var res: SignedPeerRecord try: @@ -579,6 +609,15 @@ proc readValue*( quit QuitFailure val = NBytes(value) +proc readValue*( + r: var TomlReader, val: var ThreadCount +) {.upraises: [SerializationError, IOError].} = + var str = r.readValue(string) + try: + val = parseCmdArg(ThreadCount, str) + except CatchableError as err: + raise newException(SerializationError, err.msg) + proc readValue*( r: var TomlReader, val: var Duration ) {.upraises: [SerializationError, IOError].} = @@ -609,6 +648,9 @@ proc completeCmdArg*(T: type NBytes, val: string): seq[string] = proc completeCmdArg*(T: type Duration, val: string): seq[string] = discard +proc completeCmdArg*(T: type ThreadCount, val: string): seq[string] = + discard + # silly chronicles, colors is a compile-time property proc stripAnsi*(v: string): string = var diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 87cd1f2a..3c31c8b5 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -4,47 +4,66 @@ import pkg/questionable/results export contractabi +const DefaultRequestCacheSize* = 128.uint16 + type MarketplaceConfig* = object collateral*: CollateralConfig proofs*: ProofConfig + reservations*: SlotReservationsConfig + requestDurationLimit*: uint64 CollateralConfig* = object repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value - slashCriterion*: uint16 # amount of proofs missed that lead to slashing slashPercentage*: uint8 # percentage of the collateral that is slashed + validatorRewardPercentage*: uint8 + # percentage of the slashed amount going to the validators ProofConfig* = object - period*: UInt256 # proofs requirements are calculated per period (in seconds) - timeout*: UInt256 # mark proofs as missing before the timeout (in seconds) + period*: uint64 # proofs requirements are calculated per period (in seconds) + timeout*: uint64 # mark proofs as missing before the timeout (in seconds) downtime*: uint8 # ignore this much recent blocks for proof requirements + downtimeProduct*: uint8 zkeyHash*: string # hash of the zkey file which is linked to the verifier # Ensures the pointer does not remain in downtime for many consecutive # periods. For each period increase, move the pointer `pointerProduct` # blocks. Should be a prime number to ensure there are no cycles. - downtimeProduct*: uint8 + + SlotReservationsConfig* = object + maxReservations*: uint8 func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = ProofConfig( period: tupl[0], timeout: tupl[1], downtime: tupl[2], - zkeyHash: tupl[3], - downtimeProduct: tupl[4], + downtimeProduct: tupl[3], + zkeyHash: tupl[4], ) +func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig = + SlotReservationsConfig(maxReservations: tupl[0]) + func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = CollateralConfig( repairRewardPercentage: tupl[0], maxNumberOfSlashes: tupl[1], - slashCriterion: tupl[2], - slashPercentage: tupl[3], + slashPercentage: tupl[2], + validatorRewardPercentage: tupl[3], ) func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig = - MarketplaceConfig(collateral: tupl[0], proofs: tupl[1]) + MarketplaceConfig( + collateral: tupl[0], + proofs: tupl[1], + reservations: tupl[2], + requestDurationLimit: tupl[3], + ) + +func solidityType*(_: type SlotReservationsConfig): string = + solidityType(SlotReservationsConfig.fieldTypes) func solidityType*(_: type ProofConfig): string = solidityType(ProofConfig.fieldTypes) @@ -53,7 +72,10 @@ func solidityType*(_: type CollateralConfig): string = solidityType(CollateralConfig.fieldTypes) func solidityType*(_: type MarketplaceConfig): string = - solidityType(CollateralConfig.fieldTypes) + solidityType(MarketplaceConfig.fieldTypes) + +func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) = + encoder.write(slot.fieldValues) func encode*(encoder: var AbiEncoder, slot: ProofConfig) = encoder.write(slot.fieldValues) @@ -68,6 +90,10 @@ func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T = let tupl = ?decoder.read(ProofConfig.fieldTypes) success ProofConfig.fromTuple(tupl) +func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T = + let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes) + success SlotReservationsConfig.fromTuple(tupl) + func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T = let tupl = ?decoder.read(CollateralConfig.fieldTypes) success CollateralConfig.fromTuple(tupl) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 35557050..3c016a59 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -2,6 +2,7 @@ import std/strutils import pkg/ethers import pkg/upraises import pkg/questionable +import pkg/lrucache import ../utils/exceptions import ../logutils import ../market @@ -20,6 +21,7 @@ type signer: Signer rewardRecipient: ?Address configuration: ?MarketplaceConfig + requestCache: LruCache[string, StorageRequest] MarketSubscription = market.Subscription EventSubscription = ethers.Subscription @@ -27,12 +29,22 @@ type eventSubscription: EventSubscription func new*( - _: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none + _: type OnChainMarket, + contract: Marketplace, + rewardRecipient = Address.none, + requestCacheSize: uint16 = DefaultRequestCacheSize, ): OnChainMarket = without signer =? contract.signer: raiseAssert("Marketplace contract should have a signer") - OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient) + var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize)) + + OnChainMarket( + contract: contract, + signer: signer, + rewardRecipient: rewardRecipient, + requestCache: requestCache, + ) proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) @@ -72,16 +84,21 @@ method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = let period = config.proofs.period return Periodicity(seconds: period) -method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} = +method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} = convertEthersError: let config = await market.config() return config.proofs.timeout method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = convertEthersError: - let config = await market.contract.configuration() + let config = await market.config() return config.collateral.repairRewardPercentage +method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = + convertEthersError: + let config = await market.config() + return config.requestDurationLimit + method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = convertEthersError: let config = await market.config() @@ -112,9 +129,16 @@ method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} method getRequest*( market: OnChainMarket, id: RequestId ): Future[?StorageRequest] {.async.} = + let key = $id + + if market.requestCache.contains(key): + return some market.requestCache[key] + convertEthersError: try: - return some await market.contract.getRequest(id) + let request = await market.contract.getRequest(id) + market.requestCache[key] = request + return some request except Marketplace_UnknownRequest: return none StorageRequest @@ -146,7 +170,7 @@ method requestExpiresAt*( return await market.contract.requestExpiry(id) method getHost( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.async.} = convertEthersError: let slotId = slotId(requestId, slotIndex) @@ -172,7 +196,7 @@ method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.as method fillSlot( market: OnChainMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, ) {.async.} = @@ -256,7 +280,7 @@ method canProofBeMarkedAsMissing*( return false method reserveSlot*( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = convertEthersError: discard await market.contract @@ -269,7 +293,7 @@ method reserveSlot*( .confirm(1) method canReserveSlot*( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = convertEthersError: return await market.contract.canReserveSlot(requestId, slotIndex) @@ -305,10 +329,10 @@ method subscribeSlotFilled*( method subscribeSlotFilled*( market: OnChainMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, callback: OnSlotFilled, ): Future[MarketSubscription] {.async.} = - proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) = + proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) = if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 87fd1e47..761caada 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -42,6 +42,7 @@ type Marketplace_InsufficientCollateral* = object of SolidityError Marketplace_InsufficientReward* = object of SolidityError Marketplace_InvalidCid* = object of SolidityError + Marketplace_DurationExceedsLimit* = object of SolidityError Proofs_InsufficientBlockHeight* = object of SolidityError Proofs_InvalidProof* = object of SolidityError Proofs_ProofAlreadySubmitted* = object of SolidityError @@ -59,10 +60,6 @@ proc currentCollateral*( marketplace: Marketplace, id: SlotId ): UInt256 {.contract, view.} -proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.} -proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.} -proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} - proc requestStorage*( marketplace: Marketplace, request: StorageRequest ): Confirmable {. @@ -75,10 +72,7 @@ proc requestStorage*( .} proc fillSlot*( - marketplace: Marketplace, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof ): Confirmable {. contract, errors: [ @@ -154,9 +148,6 @@ proc requestExpiry*( marketplace: Marketplace, requestId: RequestId ): SecondsSince1970 {.contract, view.} -proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.} - -proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} @@ -175,7 +166,7 @@ proc submitProof*( .} proc markProofAsMissing*( - marketplace: Marketplace, id: SlotId, period: UInt256 + marketplace: Marketplace, id: SlotId, period: uint64 ): Confirmable {. contract, errors: [ @@ -186,9 +177,9 @@ proc markProofAsMissing*( .} proc reserveSlot*( - marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64 ): Confirmable {.contract.} proc canReserveSlot*( - marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64 ): bool {.contract, view.} diff --git a/codex/contracts/provider.nim b/codex/contracts/provider.nim index b7fc5602..b1576bb0 100644 --- a/codex/contracts/provider.nim +++ b/codex/contracts/provider.nim @@ -14,7 +14,7 @@ proc raiseProviderError(message: string) {.raises: [ProviderError].} = proc blockNumberAndTimestamp*( provider: Provider, blockTag: BlockTag -): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = +): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} = without latestBlock =? await provider.getBlock(blockTag): raiseProviderError("Could not get latest block") @@ -25,7 +25,7 @@ proc blockNumberAndTimestamp*( proc binarySearchFindClosestBlock( provider: Provider, epochTime: int, low: UInt256, high: UInt256 -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low)) let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high)) if abs(lowTimestamp.truncate(int) - epochTime) < @@ -39,7 +39,7 @@ proc binarySearchBlockNumberForEpoch( epochTime: UInt256, latestBlockNumber: UInt256, earliestBlockNumber: UInt256, -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = var low = earliestBlockNumber var high = latestBlockNumber @@ -65,7 +65,7 @@ proc binarySearchBlockNumberForEpoch( proc blockNumberForEpoch*( provider: Provider, epochTime: SecondsSince1970 -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = let epochTimeUInt256 = epochTime.u256 let (latestBlockNumber, latestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.latest) @@ -118,6 +118,6 @@ proc blockNumberForEpoch*( proc pastBlockTag*( provider: Provider, blocksAgo: int -): Future[BlockTag] {.async: (raises: [ProviderError]).} = +): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} = let head = await provider.getBlockNumber() return BlockTag.init(head - blocksAgo.abs.u256) diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 48947602..2b3811c3 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -6,8 +6,11 @@ import pkg/nimcrypto import pkg/ethers/fields import pkg/questionable/results import pkg/stew/byteutils +import pkg/libp2p/[cid, multicodec] import ../logutils import ../utils/json +import ../clock +from ../errors import mapFailure export contractabi @@ -16,25 +19,25 @@ type client* {.serialize.}: Address ask* {.serialize.}: StorageAsk content* {.serialize.}: StorageContent - expiry* {.serialize.}: UInt256 + expiry* {.serialize.}: uint64 nonce*: Nonce StorageAsk* = object - slots* {.serialize.}: uint64 - slotSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 + slots* {.serialize.}: uint64 + slotSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 maxSlotLoss* {.serialize.}: uint64 StorageContent* = object - cid* {.serialize.}: string + cid* {.serialize.}: Cid merkleRoot*: array[32, byte] Slot* = object request* {.serialize.}: StorageRequest - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 SlotId* = distinct array[32, byte] RequestId* = distinct array[32, byte] @@ -108,18 +111,21 @@ func fromTuple(_: type Slot, tupl: tuple): Slot = func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = StorageAsk( - slots: tupl[0], - slotSize: tupl[1], - duration: tupl[2], - proofProbability: tupl[3], - pricePerBytePerSecond: tupl[4], - collateralPerByte: tupl[5], + proofProbability: tupl[0], + pricePerBytePerSecond: tupl[1], + collateralPerByte: tupl[2], + slots: tupl[3], + slotSize: tupl[4], + duration: tupl[5], maxSlotLoss: tupl[6], ) func fromTuple(_: type StorageContent, tupl: tuple): StorageContent = StorageContent(cid: tupl[0], merkleRoot: tupl[1]) +func solidityType*(_: type Cid): string = + solidityType(seq[byte]) + func solidityType*(_: type StorageContent): string = solidityType(StorageContent.fieldTypes) @@ -129,6 +135,10 @@ func solidityType*(_: type StorageAsk): string = func solidityType*(_: type StorageRequest): string = solidityType(StorageRequest.fieldTypes) +# Note: it seems to be ok to ignore the vbuffer offset for now +func encode*(encoder: var AbiEncoder, cid: Cid) = + encoder.write(cid.data.buffer) + func encode*(encoder: var AbiEncoder, content: StorageContent) = encoder.write(content.fieldValues) @@ -141,8 +151,12 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) = func encode*(encoder: var AbiEncoder, request: StorageRequest) = encoder.write(request.fieldValues) -func encode*(encoder: var AbiEncoder, request: Slot) = - encoder.write(request.fieldValues) +func encode*(encoder: var AbiEncoder, slot: Slot) = + encoder.write(slot.fieldValues) + +func decode*(decoder: var AbiDecoder, T: type Cid): ?!T = + let data = ?decoder.read(seq[byte]) + Cid.init(data).mapFailure func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T = let tupl = ?decoder.read(StorageContent.fieldTypes) @@ -164,21 +178,21 @@ func id*(request: StorageRequest): RequestId = let encoding = AbiEncoder.encode((request,)) RequestId(keccak256.digest(encoding).data) -func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId = +func slotId*(requestId: RequestId, slotIndex: uint64): SlotId = let encoding = AbiEncoder.encode((requestId, slotIndex)) SlotId(keccak256.digest(encoding).data) -func slotId*(request: StorageRequest, slotIndex: UInt256): SlotId = +func slotId*(request: StorageRequest, slotIndex: uint64): SlotId = slotId(request.id, slotIndex) func id*(slot: Slot): SlotId = slotId(slot.request, slot.slotIndex) func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 = - ask.pricePerBytePerSecond * ask.slotSize + ask.pricePerBytePerSecond * ask.slotSize.u256 func pricePerSlot*(ask: StorageAsk): UInt256 = - ask.duration * ask.pricePerSlotPerSecond + ask.duration.u256 * ask.pricePerSlotPerSecond func totalPrice*(ask: StorageAsk): UInt256 = ask.slots.u256 * ask.pricePerSlot @@ -187,7 +201,7 @@ func totalPrice*(request: StorageRequest): UInt256 = request.ask.totalPrice func collateralPerSlot*(ask: StorageAsk): UInt256 = - ask.collateralPerByte * ask.slotSize + ask.collateralPerByte * ask.slotSize.u256 -func size*(ask: StorageAsk): UInt256 = - ask.slots.u256 * ask.slotSize +func size*(ask: StorageAsk): uint64 = + ask.slots * ask.slotSize diff --git a/codex/erasure/backend.nim b/codex/erasure/backend.nim index a6dd8b8c..32009829 100644 --- a/codex/erasure/backend.nim +++ b/codex/erasure/backend.nim @@ -29,14 +29,18 @@ method release*(self: ErasureBackend) {.base, gcsafe.} = raiseAssert("not implemented!") method encode*( - self: EncoderBackend, buffers, parity: var openArray[seq[byte]] + self: EncoderBackend, + buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen: int, ): Result[void, cstring] {.base, gcsafe.} = ## encode buffers using a backend ## raiseAssert("not implemented!") method decode*( - self: DecoderBackend, buffers, parity, recovered: var openArray[seq[byte]] + self: DecoderBackend, + buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen, recoveredLen: int, ): Result[void, cstring] {.base, gcsafe.} = ## decode buffers using a backend ## diff --git a/codex/erasure/backends/leopard.nim b/codex/erasure/backends/leopard.nim index c9f9db40..a0016570 100644 --- a/codex/erasure/backends/leopard.nim +++ b/codex/erasure/backends/leopard.nim @@ -10,7 +10,7 @@ import std/options import pkg/leopard -import pkg/stew/results +import pkg/results import ../backend @@ -22,11 +22,13 @@ type decoder*: Option[LeoDecoder] method encode*( - self: LeoEncoderBackend, data, parity: var openArray[seq[byte]] + self: LeoEncoderBackend, + data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen: int, ): Result[void, cstring] = ## Encode data using Leopard backend - if parity.len == 0: + if parityLen == 0: return ok() var encoder = @@ -36,10 +38,12 @@ method encode*( else: self.encoder.get() - encoder.encode(data, parity) + encoder.encode(data, parity, dataLen, parityLen) method decode*( - self: LeoDecoderBackend, data, parity, recovered: var openArray[seq[byte]] + self: LeoDecoderBackend, + data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen, recoveredLen: int, ): Result[void, cstring] = ## Decode data using given Leopard backend @@ -50,7 +54,7 @@ method decode*( else: self.decoder.get() - decoder.decode(data, parity, recovered) + decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen) method release*(self: LeoEncoderBackend) = if self.encoder.isSome: diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index aacd187a..107f85bc 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -12,12 +12,14 @@ import pkg/upraises push: {.upraises: [].} -import std/sequtils -import std/sugar +import std/[sugar, atomics, sequtils] import pkg/chronos +import pkg/chronos/threadsync +import pkg/chronicles import pkg/libp2p/[multicodec, cid, multihash] import pkg/libp2p/protobuf/minprotobuf +import pkg/taskpools import ../logutils import ../manifest @@ -28,6 +30,7 @@ import ../utils import ../utils/asynciter import ../indexingstrategy import ../errors +import ../utils/arrayutils import pkg/stew/byteutils @@ -68,6 +71,7 @@ type proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.} Erasure* = ref object + taskPool: Taskpool encoderProvider*: EncoderProvider decoderProvider*: DecoderProvider store*: BlockStore @@ -87,6 +91,24 @@ type # provided. minSize*: NBytes + EncodeTask = object + success: Atomic[bool] + erasure: ptr Erasure + blocks: ptr UncheckedArray[ptr UncheckedArray[byte]] + parity: ptr UncheckedArray[ptr UncheckedArray[byte]] + blockSize, blocksLen, parityLen: int + signal: ThreadSignalPtr + + DecodeTask = object + success: Atomic[bool] + erasure: ptr Erasure + blocks: ptr UncheckedArray[ptr UncheckedArray[byte]] + parity: ptr UncheckedArray[ptr UncheckedArray[byte]] + recovered: ptr UncheckedArray[ptr UncheckedArray[byte]] + blockSize, blocksLen: int + parityLen, recoveredLen: int + signal: ThreadSignalPtr + func indexToPos(steps, idx, step: int): int {.inline.} = ## Convert an index to a position in the encoded ## dataset @@ -269,6 +291,81 @@ proc init*( strategy: strategy, ) +proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} = + # Task suitable for running in taskpools - look, no GC! + let encoder = + task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) + defer: + encoder.release() + discard task[].signal.fireSync() + + if ( + let res = + encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen) + res.isErr + ): + warn "Error from leopard encoder backend!", error = $res.error + + task[].success.store(false) + else: + task[].success.store(true) + +proc encodeAsync*( + self: Erasure, + blockSize, blocksLen, parityLen: int, + data: ref seq[seq[byte]], + parity: ptr UncheckedArray[ptr UncheckedArray[byte]], +): Future[?!void] {.async: (raises: [CancelledError]).} = + without threadPtr =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + threadPtr.close().expect("closing once works") + + var blockData = createDoubleArray(blocksLen, blockSize) + + for i in 0 ..< data[].len: + copyMem(blockData[i], addr data[i][0], blockSize) + + defer: + freeDoubleArray(blockData, blocksLen) + + ## Create an ecode task with block data + var task = EncodeTask( + erasure: addr self, + blockSize: blockSize, + blocksLen: blocksLen, + parityLen: parityLen, + blocks: blockData, + parity: parity, + signal: threadPtr, + ) + + let t = addr task + + doAssert self.taskPool.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + self.taskPool.spawn leopardEncodeTask(self.taskPool, t) + let threadFut = threadPtr.wait() + + try: + await threadFut.join() + except CatchableError as exc: + try: + await threadFut + except AsyncError as asyncExc: + return failure(asyncExc.msg) + finally: + if exc of CancelledError: + raise (ref CancelledError) exc + else: + return failure(exc.msg) + + if not t.success.load(): + return failure("Leopard encoding failed") + + success() + proc encodeData( self: Erasure, manifest: Manifest, params: EncodingParams ): Future[?!Manifest] {.async.} = @@ -276,7 +373,6 @@ proc encodeData( ## ## `manifest` - the manifest to encode ## - logScope: steps = params.steps rounded_blocks = params.rounded @@ -286,7 +382,6 @@ proc encodeData( var cids = seq[Cid].new() - encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM) emptyBlock = newSeq[byte](manifest.blockSize.int) cids[].setLen(params.blocksCount) @@ -296,8 +391,7 @@ proc encodeData( # TODO: Don't allocate a new seq every time, allocate once and zero out var data = seq[seq[byte]].new() # number of blocks to encode - parityData = - newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int)) + parity = createDoubleArray(params.ecM, manifest.blockSize.int) data[].setLen(params.ecK) # TODO: this is a tight blocking loop so we sleep here to allow @@ -311,15 +405,25 @@ proc encodeData( trace "Unable to prepare data", error = err.msg return failure(err) - trace "Erasure coding data", data = data[].len, parity = parityData.len + trace "Erasure coding data", data = data[].len - if (let res = encoder.encode(data[], parityData); res.isErr): - trace "Unable to encode manifest!", error = $res.error - return failure($res.error) + try: + if err =? ( + await self.encodeAsync( + manifest.blockSize.int, params.ecK, params.ecM, data, parity + ) + ).errorOption: + return failure(err) + except CancelledError as exc: + raise exc + finally: + freeDoubleArray(parity, params.ecM) var idx = params.rounded + step for j in 0 ..< params.ecM: - without blk =? bt.Block.new(parityData[j]), error: + var innerPtr: ptr UncheckedArray[byte] = parity[][j] + without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)), + error: trace "Unable to create parity block", err = error.msg return failure(error) @@ -356,8 +460,6 @@ proc encodeData( except CatchableError as exc: trace "Erasure coding encoding error", exc = exc.msg return failure(exc) - finally: - encoder.release() proc encode*( self: Erasure, @@ -381,6 +483,101 @@ proc encode*( return success encodedManifest +proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = + # Task suitable for running in taskpools - look, no GC! + let decoder = + task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) + defer: + decoder.release() + + if ( + let res = decoder.decode( + task[].blocks, + task[].parity, + task[].recovered, + task[].blocksLen, + task[].parityLen, + task[].recoveredLen, + ) + res.isErr + ): + warn "Error from leopard decoder backend!", error = $res.error + task[].success.store(false) + else: + task[].success.store(true) + + discard task[].signal.fireSync() + +proc decodeAsync*( + self: Erasure, + blockSize, blocksLen, parityLen: int, + blocks, parity: ref seq[seq[byte]], + recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], +): Future[?!void] {.async: (raises: [CancelledError]).} = + without threadPtr =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + threadPtr.close().expect("closing once works") + + var + blocksData = createDoubleArray(blocksLen, blockSize) + parityData = createDoubleArray(parityLen, blockSize) + + for i in 0 ..< blocks[].len: + if blocks[i].len > 0: + copyMem(blocksData[i], addr blocks[i][0], blockSize) + else: + blocksData[i] = nil + + for i in 0 ..< parity[].len: + if parity[i].len > 0: + copyMem(parityData[i], addr parity[i][0], blockSize) + else: + parityData[i] = nil + + defer: + freeDoubleArray(blocksData, blocksLen) + freeDoubleArray(parityData, parityLen) + + ## Create an decode task with block data + var task = DecodeTask( + erasure: addr self, + blockSize: blockSize, + blocksLen: blocksLen, + parityLen: parityLen, + recoveredLen: blocksLen, + blocks: blocksData, + parity: parityData, + recovered: recovered, + signal: threadPtr, + ) + + # Hold the task pointer until the signal is received + let t = addr task + doAssert self.taskPool.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + self.taskPool.spawn leopardDecodeTask(self.taskPool, t) + let threadFut = threadPtr.wait() + + try: + await threadFut.join() + except CatchableError as exc: + try: + await threadFut + except AsyncError as asyncExc: + return failure(asyncExc.msg) + finally: + if exc of CancelledError: + raise (ref CancelledError) exc + else: + return failure(exc.msg) + + if not t.success.load(): + return failure("Leopard encoding failed") + + success() + proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = ## Decode a protected manifest into it's original ## manifest @@ -388,7 +585,6 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = ## `encoded` - the encoded (protected) manifest to ## be recovered ## - logScope: steps = encoded.steps rounded_blocks = encoded.rounded @@ -411,8 +607,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = var data = seq[seq[byte]].new() parityData = seq[seq[byte]].new() - recovered = - newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int)) + recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int) data[].setLen(encoded.ecK) # set len to K parityData[].setLen(encoded.ecM) # set len to M @@ -430,15 +625,26 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = continue trace "Erasure decoding data" - - if (let err = decoder.decode(data[], parityData[], recovered); err.isErr): - trace "Unable to decode data!", err = $err.error - return failure($err.error) + try: + if err =? ( + await self.decodeAsync( + encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered + ) + ).errorOption: + return failure(err) + except CancelledError as exc: + raise exc + finally: + freeDoubleArray(recovered, encoded.ecK) for i in 0 ..< encoded.ecK: let idx = i * encoded.steps + step if data[i].len <= 0 and not cids[idx].isEmpty: - without blk =? bt.Block.new(recovered[i]), error: + var innerPtr: ptr UncheckedArray[byte] = recovered[][i] + + without blk =? bt.Block.new( + innerPtr.toOpenArray(0, encoded.blockSize.int - 1) + ), error: trace "Unable to create block!", exc = error.msg return failure(error) @@ -490,10 +696,13 @@ proc new*( store: BlockStore, encoderProvider: EncoderProvider, decoderProvider: DecoderProvider, + taskPool: Taskpool, ): Erasure = ## Create a new Erasure instance for encoding and decoding manifests ## - Erasure( - store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider + store: store, + encoderProvider: encoderProvider, + decoderProvider: decoderProvider, + taskPool: taskPool, ) diff --git a/codex/errors.nim b/codex/errors.nim index f7c2fa6b..75cefde4 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -9,7 +9,7 @@ import std/options -import pkg/stew/results +import pkg/results import pkg/chronos import pkg/questionable/results diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index 0c461e45..30e0c7ca 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -63,7 +63,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] = # optional ErasureInfo erasure = 7; # erasure coding info # optional filename: ?string = 8; # original filename # optional mimetype: ?string = 9; # original mimetype - # optional uploadedAt: ?int64 = 10; # original uploadedAt # } # ``` # @@ -102,9 +101,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] = if manifest.mimetype.isSome: header.write(9, manifest.mimetype.get()) - if manifest.uploadedAt.isSome: - header.write(10, manifest.uploadedAt.get().uint64) - pbNode.write(1, header) # set the treeCid as the data field pbNode.finish() @@ -135,7 +131,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = verifiableStrategy: uint32 filename: string mimetype: string - uploadedAt: uint64 # Decode `Header` message if pbNode.getField(1, pbHeader).isErr: @@ -169,9 +164,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = if pbHeader.getField(9, mimetype).isErr: return failure("Unable to decode `mimetype` from manifest!") - if pbHeader.getField(10, uploadedAt).isErr: - return failure("Unable to decode `uploadedAt` from manifest!") - let protected = pbErasureInfo.buffer.len > 0 var verifiable = false if protected: @@ -211,7 +203,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = var filenameOption = if filename.len == 0: string.none else: filename.some var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some - var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some let self = if protected: @@ -229,7 +220,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = strategy = StrategyType(protectedStrategy), filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption, ) else: Manifest.new( @@ -241,7 +231,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = codec = codec.MultiCodec, filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption, ) ?self.verify() diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 6e0d1b80..cbb0bace 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -38,7 +38,6 @@ type Manifest* = ref object of RootObj version: CidVersion # Cid version filename {.serialize.}: ?string # The filename of the content uploaded (optional) mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) - uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds case protected {.serialize.}: bool # Protected datasets have erasure coded info of true: ecK: int # Number of blocks to encode @@ -131,8 +130,6 @@ func filename*(self: Manifest): ?string = func mimetype*(self: Manifest): ?string = self.mimetype -func uploadedAt*(self: Manifest): ?int64 = - self.uploadedAt ############################################################ # Operations on block list ############################################################ @@ -165,14 +162,11 @@ func verify*(self: Manifest): ?!void = return success() -func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} = - self.treeCid.success - func `==`*(a, b: Manifest): bool = (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and (a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and - (a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and ( + (a.mimetype == b.mimetype) and ( if a.protected: (a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and (a.originalDatasetSize == b.originalDatasetSize) and @@ -202,9 +196,6 @@ func `$`*(self: Manifest): string = if self.mimetype.isSome: result &= ", mimetype: " & $self.mimetype - if self.uploadedAt.isSome: - result &= ", uploadedAt: " & $self.uploadedAt - result &= ( if self.protected: ", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " & @@ -236,7 +227,6 @@ func new*( protected = false, filename: ?string = string.none, mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none, ): Manifest = T( treeCid: treeCid, @@ -248,7 +238,6 @@ func new*( protected: protected, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt, ) func new*( @@ -278,7 +267,6 @@ func new*( protectedStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*(T: type Manifest, manifest: Manifest): Manifest = @@ -296,7 +284,6 @@ func new*(T: type Manifest, manifest: Manifest): Manifest = protected: false, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*( @@ -314,7 +301,6 @@ func new*( strategy = SteppedStrategy, filename: ?string = string.none, mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none, ): Manifest = Manifest( treeCid: treeCid, @@ -331,7 +317,6 @@ func new*( protectedStrategy: strategy, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt, ) func new*( @@ -374,7 +359,6 @@ func new*( verifiableStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*(T: type Manifest, data: openArray[byte]): ?!Manifest = diff --git a/codex/market.nim b/codex/market.nim index bc325cd9..5417c8e1 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -20,13 +20,12 @@ type MarketError* = object of CodexError Subscription* = ref object of RootObj OnRequest* = - proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].} + proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnSlotFilled* = - proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} + OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnSlotReservationsFull* = - proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} @@ -37,19 +36,19 @@ type StorageRequested* = object of MarketplaceEvent requestId*: RequestId ask*: StorageAsk - expiry*: UInt256 + expiry*: uint64 SlotFilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 SlotFreed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 SlotReservationsFull* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 RequestFulfilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId @@ -72,12 +71,15 @@ method getSigner*(market: Market): Future[Address] {.base, async.} = method periodicity*(market: Market): Future[Periodicity] {.base, async.} = raiseAssert("not implemented") -method proofTimeout*(market: Market): Future[UInt256] {.base, async.} = +method proofTimeout*(market: Market): Future[uint64] {.base, async.} = raiseAssert("not implemented") method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = raiseAssert("not implemented") +method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} = + raiseAssert("not implemented") + method proofDowntime*(market: Market): Future[uint8] {.base, async.} = raiseAssert("not implemented") @@ -122,7 +124,7 @@ method requestExpiresAt*( raiseAssert("not implemented") method getHost*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.base, async.} = raiseAssert("not implemented") @@ -137,7 +139,7 @@ method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, asy method fillSlot*( market: Market, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, ) {.base, async.} = @@ -177,12 +179,12 @@ method canProofBeMarkedAsMissing*( raiseAssert("not implemented") method reserveSlot*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ) {.base, async.} = raiseAssert("not implemented") method canReserveSlot*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.base, async.} = raiseAssert("not implemented") @@ -202,7 +204,7 @@ method subscribeSlotFilled*( raiseAssert("not implemented") method subscribeSlotFilled*( - market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled + market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled ): Future[Subscription] {.base, async.} = raiseAssert("not implemented") diff --git a/codex/node.nim b/codex/node.nim index ee2a2b46..e1647f3e 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -15,6 +15,7 @@ import std/strformat import std/sugar import times +import pkg/taskpools import pkg/questionable import pkg/questionable/results import pkg/chronos @@ -70,6 +71,7 @@ type contracts*: Contracts clock*: Clock storage*: Contracts + taskpool: Taskpool CodexNodeRef* = ref CodexNode @@ -235,8 +237,9 @@ proc streamEntireDataset( # Retrieve, decode and save to the local store all EС groups proc erasureJob(): Future[?!void] {.async.} = # Spawn an erasure decoding job - let erasure = - Erasure.new(self.networkStore, leoEncoderProvider, leoDecoderProvider) + let erasure = Erasure.new( + self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) without _ =? (await erasure.decode(manifest)), error: error "Unable to erasure decode manifest", manifestCid, exc = error.msg return failure(error) @@ -267,6 +270,65 @@ proc retrieve*( await self.streamEntireDataset(manifest, cid) +proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = + if err =? (await self.networkStore.delBlock(cid)).errorOption: + error "Error deleting block", cid, err = err.msg + return failure(err) + + trace "Deleted block", cid + return success() + +proc deleteEntireDataset(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = + # Deletion is a strictly local operation + var store = self.networkStore.localStore + + if not (await cid in store): + # As per the contract for delete*, an absent dataset is not an error. + return success() + + without manifestBlock =? await store.getBlock(cid), err: + return failure(err) + + without manifest =? Manifest.decode(manifestBlock), err: + return failure(err) + + let runtimeQuota = initDuration(milliseconds = 100) + var lastIdle = getTime() + for i in 0 ..< manifest.blocksCount: + if (getTime() - lastIdle) >= runtimeQuota: + await idleAsync() + lastIdle = getTime() + + if err =? (await store.delBlock(manifest.treeCid, i)).errorOption: + # The contract for delBlock is fuzzy, but we assume that if the block is + # simply missing we won't get an error. This is a best effort operation and + # can simply be retried. + error "Failed to delete block within dataset", index = i, err = err.msg + return failure(err) + + if err =? (await store.delBlock(cid)).errorOption: + error "Error deleting manifest block", err = err.msg + + success() + +proc delete*( + self: CodexNodeRef, cid: Cid +): Future[?!void] {.async: (raises: [CatchableError]).} = + ## Deletes a whole dataset, if Cid is a Manifest Cid, or a single block, if Cid a block Cid, + ## from the underlying block store. This is a strictly local operation. + ## + ## Missing blocks in dataset deletes are ignored. + ## + + without isManifest =? cid.isManifest, err: + trace "Bad content type for CID:", cid = cid, err = err.msg + return failure(err) + + if not isManifest: + return await self.deleteSingleBlock(cid) + + await self.deleteEntireDataset(cid) + proc store*( self: CodexNodeRef, stream: LPStream, @@ -332,7 +394,6 @@ proc store*( codec = dataCodec, filename = filename, mimetype = mimetype, - uploadedAt = now().utc.toTime.toUnix.some, ) without manifestBlk =? await self.storeManifest(manifest), err: @@ -369,13 +430,13 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = proc setupRequest( self: CodexNodeRef, cid: Cid, - duration: UInt256, + duration: uint64, proofProbability: UInt256, nodes: uint, tolerance: uint, pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, - expiry: UInt256, + expiry: uint64, ): Future[?!StorageRequest] {.async.} = ## Setup slots for a given dataset ## @@ -403,8 +464,9 @@ proc setupRequest( return failure error # Erasure code the dataset according to provided parameters - let erasure = - Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider) + let erasure = Erasure.new( + self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: trace "Unable to erasure code dataset" @@ -432,17 +494,14 @@ proc setupRequest( request = StorageRequest( ask: StorageAsk( slots: verifiable.numSlots.uint64, - slotSize: builder.slotBytes.uint.u256, + slotSize: builder.slotBytes.uint64, duration: duration, proofProbability: proofProbability, pricePerBytePerSecond: pricePerBytePerSecond, collateralPerByte: collateralPerByte, maxSlotLoss: tolerance, ), - content: StorageContent( - cid: $manifestBlk.cid, # TODO: why string? - merkleRoot: verifyRoot, - ), + content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot), expiry: expiry, ) @@ -452,13 +511,13 @@ proc setupRequest( proc requestStorage*( self: CodexNodeRef, cid: Cid, - duration: UInt256, + duration: uint64, proofProbability: UInt256, nodes: uint, tolerance: uint, pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, - expiry: UInt256, + expiry: uint64, ): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. @@ -472,7 +531,7 @@ proc requestStorage*( pricePerBytePerSecond = pricePerBytePerSecond proofProbability = proofProbability collateralPerByte = collateralPerByte - expiry = expiry.truncate(int64) + expiry = expiry now = self.clock.now trace "Received a request for storage!" @@ -494,21 +553,19 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb + self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb ): Future[?!void] {.async.} = ## store data in local storage ## + let cid = request.content.cid + logScope: - cid = request.content.cid + cid = $cid slotIdx = slotIdx trace "Received a request to store a slot" - without cid =? Cid.init(request.content.cid).mapFailure, err: - trace "Unable to parse Cid", cid - return failure(err) - without manifest =? (await self.fetchManifest(cid)), err: trace "Unable to fetch manifest for cid", cid, err = err.msg return failure(err) @@ -518,11 +575,9 @@ proc onStore( trace "Unable to create slots builder", err = err.msg return failure(err) - let - slotIdx = slotIdx.truncate(int) - expiry = request.expiry.toSecondsSince1970 + let expiry = request.expiry - if slotIdx > manifest.slotRoots.high: + if slotIdx > manifest.slotRoots.high.uint64: trace "Slot index not in manifest", slotIdx return failure(newException(CodexError, "Slot index not in manifest")) @@ -530,7 +585,7 @@ proc onStore( trace "Updating expiry for blocks", blocks = blocks.len let ensureExpiryFutures = - blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) + blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970)) if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: return failure(updateExpiryErr) @@ -546,7 +601,11 @@ proc onStore( trace "Unable to create indexing strategy from protected manifest", err = err.msg return failure(err) - without blksIter =? indexer.getIndicies(slotIdx).catch, err: + if slotIdx > int.high.uint64: + error "Cannot cast slot index to int", slotIndex = slotIdx + return + + without blksIter =? indexer.getIndicies(slotIdx.int).catch, err: trace "Unable to get indicies from strategy", err = err.msg return failure(err) @@ -556,13 +615,13 @@ proc onStore( trace "Unable to fetch blocks", err = err.msg return failure(err) - without slotRoot =? (await builder.buildSlot(slotIdx.Natural)), err: + without slotRoot =? (await builder.buildSlot(slotIdx.int)), err: trace "Unable to build slot", err = err.msg return failure(err) trace "Slot successfully retrieved and reconstructed" - if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]: + if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]: trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() return failure(newException(CodexError, "Slot root mismatch")) @@ -578,8 +637,8 @@ proc onProve( ## let - cidStr = slot.request.content.cid - slotIdx = slot.slotIndex.truncate(Natural) + cidStr = $slot.request.content.cid + slotIdx = slot.slotIndex logScope: cid = cidStr @@ -600,7 +659,8 @@ proc onProve( return failure(err) when defined(verify_circuit): - without (inputs, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge), + err: error "Unable to generate proof", err = err.msg return failure(err) @@ -614,7 +674,7 @@ proc onProve( trace "Proof verified successfully" else: - without (_, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err: error "Unable to generate proof", err = err.msg return failure(err) @@ -627,16 +687,11 @@ proc onProve( failure "Prover not enabled" proc onExpiryUpdate( - self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970 + self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = - without cid =? Cid.init(rootCid): - trace "Unable to parse Cid", cid - let error = newException(CodexError, "Unable to parse Cid") - return failure(error) + return await self.updateExpiry(rootCid, expiry) - return await self.updateExpiry(cid, expiry) - -proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) = +proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) = # TODO: remove data from local storage discard @@ -652,16 +707,16 @@ proc start*(self: CodexNodeRef) {.async.} = if hostContracts =? self.contracts.host: hostContracts.sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] = self.onStore(request, slot, onBatch) hostContracts.sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] = self.onExpiryUpdate(rootCid, expiry) - hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = + hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) = # TODO: remove data from local storage self.onClear(request, slotIndex) @@ -724,12 +779,16 @@ proc stop*(self: CodexNodeRef) {.async.} = if not self.networkStore.isNil: await self.networkStore.close + if not self.taskpool.isNil: + self.taskpool.shutdown() + proc new*( T: type CodexNodeRef, switch: Switch, networkStore: NetworkStore, engine: BlockExcEngine, discovery: Discovery, + taskpool: Taskpool, prover = Prover.none, contracts = Contracts.default, ): CodexNodeRef = @@ -742,5 +801,6 @@ proc new*( engine: engine, prover: prover, discovery: discovery, + taskPool: taskpool, contracts: contracts, ) diff --git a/codex/periods.nim b/codex/periods.nim index 429931ee..cbb860e2 100644 --- a/codex/periods.nim +++ b/codex/periods.nim @@ -2,10 +2,10 @@ import pkg/stint type Periodicity* = object - seconds*: UInt256 + seconds*: uint64 - Period* = UInt256 - Timestamp* = UInt256 + Period* = uint64 + Timestamp* = uint64 func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period = timestamp div periodicity.seconds diff --git a/codex/purchasing.nim b/codex/purchasing.nim index 4ab84405..25a35137 100644 --- a/codex/purchasing.nim +++ b/codex/purchasing.nim @@ -14,7 +14,7 @@ export purchase type Purchasing* = ref object - market: Market + market*: Market clock: Clock purchases: Table[PurchaseId, Purchase] proofProbability*: UInt256 diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index 760dc81a..5aeeceac 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -1,25 +1,35 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling +import ./error declareCounter(codex_purchases_cancelled, "codex purchases cancelled") logScope: topics = "marketplace purchases cancelled" -type PurchaseCancelled* = ref object of ErrorHandlingState +type PurchaseCancelled* = ref object of PurchaseState method `$`*(state: PurchaseCancelled): string = "cancelled" -method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseCancelled, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_cancelled.inc() let purchase = Purchase(machine) - warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + try: + warn "Request cancelled, withdrawing remaining funds", + requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) - let error = newException(Timeout, "Purchase cancelled due to timeout") - purchase.future.fail(error) + let error = newException(Timeout, "Purchase cancelled due to timeout") + purchase.future.fail(error) + except CancelledError as e: + trace "PurchaseCancelled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseCancelled.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index d7017b38..afa9f54f 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -14,7 +14,9 @@ type PurchaseErrored* = ref object of PurchaseState method `$`*(state: PurchaseErrored): string = "errored" -method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseErrored, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_error.inc() let purchase = Purchase(machine) diff --git a/codex/purchasing/states/errorhandling.nim b/codex/purchasing/states/errorhandling.nim deleted file mode 100644 index 8ef91ba6..00000000 --- a/codex/purchasing/states/errorhandling.nim +++ /dev/null @@ -1,8 +0,0 @@ -import pkg/questionable -import ../statemachine -import ./error - -type ErrorHandlingState* = ref object of PurchaseState - -method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = - some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index 5a126a73..1f6be74f 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -1,6 +1,7 @@ import pkg/metrics import ../statemachine import ../../logutils +import ../../utils/exceptions import ./error declareCounter(codex_purchases_failed, "codex purchases failed") @@ -10,11 +11,20 @@ type PurchaseFailed* = ref object of PurchaseState method `$`*(state: PurchaseFailed): string = "failed" -method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseFailed, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_failed.inc() let purchase = Purchase(machine) - warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + + try: + warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) + except CancelledError as e: + trace "PurchaseFailed.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseFailed.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) let error = newException(PurchaseError, "Purchase failed") return some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/finished.nim b/codex/purchasing/states/finished.nim index 6cf5ffcc..bb7a726d 100644 --- a/codex/purchasing/states/finished.nim +++ b/codex/purchasing/states/finished.nim @@ -1,7 +1,9 @@ import pkg/metrics import ../statemachine +import ../../utils/exceptions import ../../logutils +import ./error declareCounter(codex_purchases_finished, "codex purchases finished") @@ -13,10 +15,19 @@ type PurchaseFinished* = ref object of PurchaseState method `$`*(state: PurchaseFinished): string = "finished" -method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseFinished, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_finished.inc() let purchase = Purchase(machine) - info "Purchase finished, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + try: + info "Purchase finished, withdrawing remaining funds", + requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) - purchase.future.complete() + purchase.future.complete() + except CancelledError as e: + trace "PurchaseFinished.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseFinished.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/pending.nim b/codex/purchasing/states/pending.nim index 4852f266..1472a63e 100644 --- a/codex/purchasing/states/pending.nim +++ b/codex/purchasing/states/pending.nim @@ -1,18 +1,28 @@ import pkg/metrics +import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./submitted +import ./error declareCounter(codex_purchases_pending, "codex purchases pending") -type PurchasePending* = ref object of ErrorHandlingState +type PurchasePending* = ref object of PurchaseState method `$`*(state: PurchasePending): string = "pending" -method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchasePending, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_pending.inc() let purchase = Purchase(machine) - let request = !purchase.request - await purchase.market.requestStorage(request) - return some State(PurchaseSubmitted()) + try: + let request = !purchase.request + await purchase.market.requestStorage(request) + return some State(PurchaseSubmitted()) + except CancelledError as e: + trace "PurchasePending.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchasePending.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 083e64c8..e93d7013 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -1,22 +1,25 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./finished import ./failed +import ./error declareCounter(codex_purchases_started, "codex purchases started") logScope: topics = "marketplace purchases started" -type PurchaseStarted* = ref object of ErrorHandlingState +type PurchaseStarted* = ref object of PurchaseState method `$`*(state: PurchaseStarted): string = "started" -method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseStarted, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_started.inc() let purchase = Purchase(machine) @@ -28,15 +31,24 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} proc callback(_: RequestId) = failed.complete() - let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) + var ended: Future[void] + try: + let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) - # Ensure that we're past the request end by waiting an additional second - let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) - let fut = await one(ended, failed) - await subscription.unsubscribe() - if fut.id == failed.id: + # Ensure that we're past the request end by waiting an additional second + ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) + let fut = await one(ended, failed) + await subscription.unsubscribe() + if fut.id == failed.id: + ended.cancelSoon() + return some State(PurchaseFailed()) + else: + failed.cancelSoon() + return some State(PurchaseFinished()) + except CancelledError as e: ended.cancelSoon() - return some State(PurchaseFailed()) - else: failed.cancelSoon() - return some State(PurchaseFinished()) + trace "PurchaseStarted.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseStarted.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 1cf65b1f..dd3669e4 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -1,22 +1,25 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./started import ./cancelled +import ./error logScope: topics = "marketplace purchases submitted" declareCounter(codex_purchases_submitted, "codex purchases submitted") -type PurchaseSubmitted* = ref object of ErrorHandlingState +type PurchaseSubmitted* = ref object of PurchaseState method `$`*(state: PurchaseSubmitted): string = "submitted" -method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseSubmitted, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_submitted.inc() let purchase = Purchase(machine) let request = !purchase.request @@ -44,5 +47,10 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async. await wait().withTimeout() except Timeout: return some State(PurchaseCancelled()) + except CancelledError as e: + trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseSubmitted.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) return some State(PurchaseStarted()) diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index 54e09942..8c2bff48 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -1,34 +1,44 @@ import pkg/metrics +import ../../utils/exceptions +import ../../logutils import ../statemachine -import ./errorhandling import ./submitted import ./started import ./cancelled import ./finished import ./failed +import ./error declareCounter(codex_purchases_unknown, "codex purchases unknown") -type PurchaseUnknown* = ref object of ErrorHandlingState +type PurchaseUnknown* = ref object of PurchaseState method `$`*(state: PurchaseUnknown): string = "unknown" -method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} = - codex_purchases_unknown.inc() - let purchase = Purchase(machine) - if (request =? await purchase.market.getRequest(purchase.requestId)) and - (requestState =? await purchase.market.requestState(purchase.requestId)): - purchase.request = some request +method run*( + state: PurchaseUnknown, machine: Machine +): Future[?State] {.async: (raises: []).} = + try: + codex_purchases_unknown.inc() + let purchase = Purchase(machine) + if (request =? await purchase.market.getRequest(purchase.requestId)) and + (requestState =? await purchase.market.requestState(purchase.requestId)): + purchase.request = some request - case requestState - of RequestState.New: - return some State(PurchaseSubmitted()) - of RequestState.Started: - return some State(PurchaseStarted()) - of RequestState.Cancelled: - return some State(PurchaseCancelled()) - of RequestState.Finished: - return some State(PurchaseFinished()) - of RequestState.Failed: - return some State(PurchaseFailed()) + case requestState + of RequestState.New: + return some State(PurchaseSubmitted()) + of RequestState.Started: + return some State(PurchaseStarted()) + of RequestState.Cancelled: + return some State(PurchaseCancelled()) + of RequestState.Finished: + return some State(PurchaseFinished()) + of RequestState.Failed: + return some State(PurchaseFailed()) + except CancelledError as e: + trace "PurchaseUnknown.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseUnknown.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index a64d26cf..e5c8d195 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -238,6 +238,15 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute let json = await formatManifestBlocks(node) return RestApiResponse.response($json, contentType = "application/json") + router.api(MethodOptions, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET,DELETE", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + router.api(MethodGet, "/api/codex/v1/data/{cid}") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: @@ -254,6 +263,27 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute await node.retrieveCid(cid.get(), local = true, resp = resp) + router.api(MethodDelete, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Deletes either a single block or an entire dataset + ## from the local node. Does nothing and returns 200 + ## if the dataset is not locally available. + ## + var headers = buildCorsHeaders("DELETE", allowedOrigin) + + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) + + if err =? (await node.delete(cid.get())).errorOption: + return RestApiResponse.error(Http500, err.msg, headers = headers) + + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("DELETE", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: @@ -433,7 +463,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = Http400, "Total size must be larger then zero", headers = headers ) - if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): + if not reservations.hasAvailable(restAv.totalSize): return RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) @@ -607,6 +637,14 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = without params =? StorageRequestParams.fromJson(body), error: return RestApiResponse.error(Http400, error.msg, headers = headers) + let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit + if params.duration > requestDurationLimit: + return RestApiResponse.error( + Http400, + "Duration exceeds limit of " & $requestDurationLimit & " seconds", + headers = headers, + ) + let nodes = params.nodes |? 3 let tolerance = params.tolerance |? 1 diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index 1c997ccf..319ce3d6 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -14,7 +14,7 @@ import pkg/chronos import pkg/libp2p import pkg/stew/base10 import pkg/stew/byteutils -import pkg/stew/results +import pkg/results import pkg/stint import ../sales diff --git a/codex/rest/json.nim b/codex/rest/json.nim index 9bc7664e..c221ba73 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -13,11 +13,11 @@ export json type StorageRequestParams* = object - duration* {.serialize.}: UInt256 + duration* {.serialize.}: uint64 proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 - expiry* {.serialize.}: ?UInt256 + expiry* {.serialize.}: ?uint64 nodes* {.serialize.}: ?uint tolerance* {.serialize.}: ?uint @@ -28,16 +28,16 @@ type error* {.serialize.}: ?string RestAvailability* = object - totalSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 + totalSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral* {.serialize.}: UInt256 - freeSize* {.serialize.}: ?UInt256 + freeSize* {.serialize.}: ?uint64 RestSalesAgent* = object state* {.serialize.}: string requestId* {.serialize.}: RequestId - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 request* {.serialize.}: ?StorageRequest reservation* {.serialize.}: ?Reservation diff --git a/codex/sales.nim b/codex/sales.nim index 4bf2d13c..91d882b8 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -150,16 +150,16 @@ proc cleanUp( ).errorOption: error "failure deleting reservation", error = deleteErr.msg + if data.slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16", slotIndex = data.slotIndex + return + # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: let queue = sales.context.slotQueue var seenItem = SlotQueueItem.init( - data.requestId, - data.slotIndex.truncate(uint16), - data.ask, - request.expiry, - seen = true, + data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true ) trace "pushing ignored item to queue, marked as seen" if err =? queue.push(seenItem).errorOption: @@ -172,7 +172,7 @@ proc cleanUp( processing.complete() proc filled( - sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void] + sales: Sales, request: StorageRequest, slotIndex: uint64, processing: Future[void] ) = if onSale =? sales.context.onSale: onSale(request, slotIndex) @@ -184,16 +184,15 @@ proc filled( proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex - let agent = newSalesAgent( - sales.context, item.requestId, item.slotIndex.u256, none StorageRequest - ) + let agent = + newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest) agent.onCleanUp = proc( returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) - agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) = + agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) = sales.filled(request, slotIndex, done) agent.start(SalePreparing()) @@ -283,7 +282,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = queue.unpause() proc onStorageRequested( - sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256 + sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64 ) = logScope: topics = "marketplace sales onStorageRequested" @@ -312,7 +311,7 @@ proc onStorageRequested( else: warn "Error adding request to SlotQueue", error = err.msg -proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = +proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = logScope: topics = "marketplace sales onSlotFreed" requestId @@ -325,8 +324,12 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = let market = context.market let queue = context.slotQueue - # first attempt to populate request using existing slot metadata in queue - without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)): + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + + # first attempt to populate request using existing metadata in queue + without var found =? queue.populateItem(requestId, slotIndex.uint16): trace "no existing request metadata, getting request info from contract" # if there's no existing slot for that request, retrieve the request # from the contract. @@ -335,7 +338,7 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = error "unknown request in contract" return - found = SlotQueueItem.init(request, slotIndex.truncate(uint16)) + found = SlotQueueItem.init(request, slotIndex.uint16) except CancelledError: discard # do not propagate as addSlotToQueue was asyncSpawned except CatchableError as e: @@ -353,7 +356,7 @@ proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) = + proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) = sales.onStorageRequested(requestId, ask, expiry) try: @@ -426,9 +429,13 @@ proc subscribeSlotFilled(sales: Sales) {.async.} = let market = context.market let queue = context.slotQueue - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + trace "slot filled, removing from slot queue", requestId, slotIndex - queue.delete(requestId, slotIndex.truncate(uint16)) + queue.delete(requestId, slotIndex.uint16) for agent in sales.agents: agent.onSlotFilled(requestId, slotIndex) @@ -445,7 +452,7 @@ proc subscribeSlotFreed(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFreed(requestId: RequestId, slotIndex: uint64) = sales.onSlotFreed(requestId, slotIndex) try: @@ -461,9 +468,13 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} = let market = context.market let queue = context.slotQueue - proc onSlotReservationsFull(requestId: RequestId, slotIndex: UInt256) = + proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) = + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + trace "reservations for slot full, removing from slot queue", requestId, slotIndex - queue.delete(requestId, slotIndex.truncate(uint16)) + queue.delete(requestId, slotIndex.uint16) try: let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull) diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 4f48e057..a64cb602 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -64,9 +64,9 @@ type SomeStorableId = AvailabilityId | ReservationId Availability* = ref object id* {.serialize.}: AvailabilityId - totalSize* {.serialize.}: UInt256 - freeSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 + totalSize* {.serialize.}: uint64 + freeSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral {.serialize.}: UInt256 totalRemainingCollateral* {.serialize.}: UInt256 @@ -74,9 +74,9 @@ type Reservation* = ref object id* {.serialize.}: ReservationId availabilityId* {.serialize.}: AvailabilityId - size* {.serialize.}: UInt256 + size* {.serialize.}: uint64 requestId* {.serialize.}: RequestId - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 Reservations* = ref object of RootObj availabilityLock: AsyncLock @@ -123,9 +123,9 @@ proc new*(T: type Reservations, repo: RepoStore): Reservations = proc init*( _: type Availability, - totalSize: UInt256, - freeSize: UInt256, - duration: UInt256, + totalSize: uint64, + freeSize: uint64, + duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, ): Availability = @@ -151,9 +151,9 @@ proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} = proc init*( _: type Reservation, availabilityId: AvailabilityId, - size: UInt256, + size: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, ): Reservation = var id: array[32, byte] doAssert randomBytes(id) == 32 @@ -206,7 +206,7 @@ func key*(availability: Availability): ?!Key = return availability.id.key func maxCollateralPerByte*(availability: Availability): UInt256 = - return availability.totalRemainingCollateral div availability.freeSize + return availability.totalRemainingCollateral div availability.freeSize.stuint(256) func key*(reservation: Reservation): ?!Key = return key(reservation.id, reservation.availabilityId) @@ -289,16 +289,12 @@ proc updateAvailability( trace "totalSize changed, updating repo reservation" if oldAvailability.totalSize < obj.totalSize: # storage added if reserveErr =? ( - await self.repo.reserve( - (obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes - ) + await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes) ).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) elif oldAvailability.totalSize > obj.totalSize: # storage removed if reserveErr =? ( - await self.repo.release( - (oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes - ) + await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes) ).errorOption: return failure(reserveErr.toErr(ReleaseFailedError)) @@ -361,7 +357,7 @@ proc deleteReservation*( else: return failure(error) - if reservation.size > 0.u256: + if reservation.size > 0.uint64: trace "returning remaining reservation bytes to availability", size = reservation.size @@ -389,8 +385,8 @@ proc deleteReservation*( proc createAvailability*( self: Reservations, - size: UInt256, - duration: UInt256, + size: uint64, + duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, ): Future[?!Availability] {.async.} = @@ -399,7 +395,7 @@ proc createAvailability*( let availability = Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral) - let bytes = availability.freeSize.truncate(uint) + let bytes = availability.freeSize if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) @@ -418,9 +414,9 @@ proc createAvailability*( method createReservation*( self: Reservations, availabilityId: AvailabilityId, - slotSize: UInt256, + slotSize: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, collateralPerByte: UInt256, ): Future[?!Reservation] {.async, base.} = withLock(self.availabilityLock): @@ -450,7 +446,7 @@ method createReservation*( availability.freeSize -= slotSize # adjust the remaining totalRemainingCollateral - availability.totalRemainingCollateral -= slotSize * collateralPerByte + availability.totalRemainingCollateral -= slotSize.stuint(256) * collateralPerByte # update availability with reduced size trace "Updating availability with reduced size" @@ -475,7 +471,7 @@ proc returnBytesToAvailability*( self: Reservations, availabilityId: AvailabilityId, reservationId: ReservationId, - bytes: UInt256, + bytes: uint64, ): Future[?!void] {.async.} = logScope: reservationId @@ -502,8 +498,7 @@ proc returnBytesToAvailability*( # First lets see if we can re-reserve the bytes, if the Repo's quota # is depleted then we will fail-fast as there is nothing to be done atm. - if reserveErr =? - (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) without availabilityKey =? availabilityId.key, error: @@ -517,8 +512,7 @@ proc returnBytesToAvailability*( # Update availability with returned size if updateErr =? (await self.updateAvailability(availability)).errorOption: trace "Rolling back returning bytes" - if rollbackErr =? - (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption: rollbackErr.parent = updateErr return failure(rollbackErr) @@ -546,7 +540,7 @@ proc release*( without var reservation =? (await self.get(key, Reservation)), error: return failure(error) - if reservation.size < bytes.u256: + if reservation.size < bytes: let error = newException( BytesOutOfBoundsError, "trying to release an amount of bytes that is greater than the total size of the Reservation", @@ -556,7 +550,7 @@ proc release*( if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption: return failure(releaseErr.toErr(ReleaseFailedError)) - reservation.size -= bytes.u256 + reservation.size -= bytes # persist partially used Reservation with updated size if err =? (await self.update(reservation)).errorOption: @@ -643,7 +637,8 @@ proc all*( proc findAvailability*( self: Reservations, - size, duration, pricePerBytePerSecond, collateralPerByte: UInt256, + size, duration: uint64, + pricePerBytePerSecond, collateralPerByte: UInt256, ): Future[?Availability] {.async.} = without storables =? (await self.storables(Availability)), e: error "failed to get all storables", error = e.msg diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index 8a8e5dc0..e6328a83 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -6,6 +6,7 @@ import pkg/upraises import ../contracts/requests import ../errors import ../logutils +import ../utils/exceptions import ./statemachine import ./salescontext import ./salesdata @@ -28,7 +29,7 @@ type OnCleanUp* = proc( returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none ): Future[void] {.gcsafe, upraises: [].} - OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} SalesAgentError = object of CodexError AllSlotsFilledError* = object of SalesAgentError @@ -39,7 +40,7 @@ func `==`*(a, b: SalesAgent): bool = proc newSalesAgent*( context: SalesContext, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, request: ?StorageRequest, ): SalesAgent = var agent = SalesAgent.new() @@ -68,41 +69,48 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = let data = agent.data let clock = agent.context.clock - proc onCancelled() {.async.} = + proc onCancelled() {.async: (raises: []).} = without request =? data.request: return - let market = agent.context.market - let expiry = await market.requestExpiresAt(data.requestId) + try: + let market = agent.context.market + let expiry = await market.requestExpiresAt(data.requestId) - while true: - let deadline = max(clock.now, expiry) + 1 - trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline - await clock.waitUntil(deadline) + while true: + let deadline = max(clock.now, expiry) + 1 + trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline + await clock.waitUntil(deadline) - without state =? await agent.retrieveRequestState(): - error "Uknown request", requestId = data.requestId - return + without state =? await agent.retrieveRequestState(): + error "Unknown request", requestId = data.requestId + return - case state - of New: - discard - of RequestState.Cancelled: - agent.schedule(cancelledEvent(request)) - break - of RequestState.Started, RequestState.Finished, RequestState.Failed: - break + case state + of New: + discard + of RequestState.Cancelled: + agent.schedule(cancelledEvent(request)) + break + of RequestState.Started, RequestState.Finished, RequestState.Failed: + break - debug "The request is not yet canceled, even though it should be. Waiting for some more time.", - currentState = state, now = clock.now + debug "The request is not yet canceled, even though it should be. Waiting for some more time.", + currentState = state, now = clock.now + except CancelledError: + trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId + except CatchableError as e: + error "Error while waiting for expiry to lapse", error = e.msgDetail data.cancelled = onCancelled() + asyncSpawn data.cancelled method onFulfilled*( agent: SalesAgent, requestId: RequestId ) {.base, gcsafe, upraises: [].} = - if agent.data.requestId == requestId and not agent.data.cancelled.isNil: - agent.data.cancelled.cancelSoon() + let cancelled = agent.data.cancelled + if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished: + cancelled.cancelSoon() method onFailed*( agent: SalesAgent, requestId: RequestId @@ -113,7 +121,7 @@ method onFailed*( agent.schedule(failedEvent(request)) method onSlotFilled*( - agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 + agent: SalesAgent, requestId: RequestId, slotIndex: uint64 ) {.base, gcsafe, upraises: [].} = if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: agent.schedule(slotFilledEvent(requestId, slotIndex)) diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index bb0b5dc9..6e6a3568 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -1,6 +1,7 @@ import pkg/questionable import pkg/questionable/results import pkg/upraises +import pkg/libp2p/cid import ../market import ../clock @@ -25,13 +26,13 @@ type BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} OnStore* = proc( - request: StorageRequest, slot: UInt256, blocksCb: BlocksCb + request: StorageRequest, slot: uint64, blocksCb: BlocksCb ): Future[?!void] {.gcsafe, upraises: [].} OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. gcsafe, upraises: [] .} - OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {. + OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {. gcsafe, upraises: [] .} - OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} + OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim index 995c7a4b..de8eccb5 100644 --- a/codex/sales/salesdata.nim +++ b/codex/sales/salesdata.nim @@ -7,6 +7,6 @@ type SalesData* = ref object requestId*: RequestId ask*: StorageAsk request*: ?StorageRequest - slotIndex*: UInt256 + slotIndex*: uint64 cancelled*: Future[void] reservation*: ?Reservation diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index 332ec9e0..a032d46b 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -5,6 +5,7 @@ import pkg/questionable import pkg/questionable/results import pkg/upraises import ../errors +import ../clock import ../logutils import ../rng import ../utils @@ -30,11 +31,11 @@ type SlotQueueItem* = object requestId: RequestId slotIndex: uint16 - slotSize: UInt256 - duration: UInt256 + slotSize: uint64 + duration: uint64 pricePerBytePerSecond: UInt256 collateralPerByte: UInt256 - expiry: UInt256 + expiry: uint64 seen: bool # don't need to -1 to prevent overflow when adding 1 (to always allow push) @@ -135,7 +136,7 @@ proc init*( requestId: RequestId, slotIndex: uint16, ask: StorageAsk, - expiry: UInt256, + expiry: uint64, seen = false, ): SlotQueueItem = SlotQueueItem( @@ -155,7 +156,7 @@ proc init*( SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) proc init*( - _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256 + _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64 ): seq[SlotQueueItem] = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") @@ -182,10 +183,10 @@ proc requestId*(self: SlotQueueItem): RequestId = proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex -proc slotSize*(self: SlotQueueItem): UInt256 = +proc slotSize*(self: SlotQueueItem): uint64 = self.slotSize -proc duration*(self: SlotQueueItem): UInt256 = +proc duration*(self: SlotQueueItem): uint64 = self.duration proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 = diff --git a/codex/sales/statemachine.nim b/codex/sales/statemachine.nim index 6d3c7101..ec770ece 100644 --- a/codex/sales/statemachine.nim +++ b/codex/sales/statemachine.nim @@ -25,7 +25,7 @@ method onFailed*( discard method onSlotFilled*( - state: SaleState, requestId: RequestId, slotIndex: UInt256 + state: SaleState, requestId: RequestId, slotIndex: uint64 ): ?State {.base, upraises: [].} = discard @@ -37,6 +37,6 @@ proc failedEvent*(request: StorageRequest): Event = return proc(state: State): ?State = SaleState(state).onFailed(request) -proc slotFilledEvent*(requestId: RequestId, slotIndex: UInt256): Event = +proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event = return proc(state: State): ?State = SaleState(state).onSlotFilled(requestId, slotIndex) diff --git a/codex/sales/states/cancelled.nim b/codex/sales/states/cancelled.nim index 3bb92a2c..3bdf8c2f 100644 --- a/codex/sales/states/cancelled.nim +++ b/codex/sales/states/cancelled.nim @@ -1,17 +1,20 @@ import ../../logutils +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling +import ./errored logScope: topics = "marketplace sales cancelled" -type SaleCancelled* = ref object of ErrorHandlingState +type SaleCancelled* = ref object of SaleState method `$`*(state: SaleCancelled): string = "SaleCancelled" -method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleCancelled, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let market = agent.context.market @@ -19,21 +22,27 @@ method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} = without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting collateral and partial payout", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Collecting collateral and partial payout", + requestId = data.requestId, slotIndex = data.slotIndex + let currentCollateral = await market.currentCollateral(slot.id) + await market.freeSlot(slot.id) - if onClear =? agent.context.onClear and request =? data.request: - onClear(request, data.slotIndex) + if onClear =? agent.context.onClear and request =? data.request: + onClear(request, data.slotIndex) - if onCleanUp =? agent.onCleanUp: - await onCleanUp( - returnBytes = true, - reprocessSlot = false, - returnedCollateral = some currentCollateral, - ) + if onCleanUp =? agent.onCleanUp: + await onCleanUp( + returnBytes = true, + reprocessSlot = false, + returnedCollateral = some currentCollateral, + ) - warn "Sale cancelled due to timeout", - requestId = data.requestId, slotIndex = data.slotIndex + warn "Sale cancelled due to timeout", + requestId = data.requestId, slotIndex = data.slotIndex + except CancelledError as e: + trace "SaleCancelled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleCancelled.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/downloading.nim b/codex/sales/states/downloading.nim index f6ced6be..39137545 100644 --- a/codex/sales/states/downloading.nim +++ b/codex/sales/states/downloading.nim @@ -4,16 +4,16 @@ import pkg/questionable/results import ../../blocktype as bt import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./filled import ./initialproving import ./errored -type SaleDownloading* = ref object of ErrorHandlingState +type SaleDownloading* = ref object of SaleState logScope: topics = "marketplace sales downloading" @@ -28,11 +28,13 @@ method onFailed*(state: SaleDownloading, request: StorageRequest): ?State = return some State(SaleFailed()) method onSlotFilled*( - state: SaleDownloading, requestId: RequestId, slotIndex: UInt256 + state: SaleDownloading, requestId: RequestId, slotIndex: uint64 ): ?State = return some State(SaleFilled()) -method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleDownloading, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -64,9 +66,15 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} trace "Releasing batch of bytes written to disk", bytes return await reservations.release(reservation.id, reservation.availabilityId, bytes) - trace "Starting download" - if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: - return some State(SaleErrored(error: err, reprocessSlot: false)) + try: + trace "Starting download" + if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: + return some State(SaleErrored(error: err, reprocessSlot: false)) - trace "Download complete" - return some State(SaleInitialProving()) + trace "Download complete" + return some State(SaleInitialProving()) + except CancelledError as e: + trace "SaleDownloading.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleDownloading.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/errored.nim b/codex/sales/states/errored.nim index b85b7930..77bf08d3 100644 --- a/codex/sales/states/errored.nim +++ b/codex/sales/states/errored.nim @@ -17,10 +17,9 @@ type SaleErrored* = ref object of SaleState method `$`*(state: SaleErrored): string = "SaleErrored" -method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} = - error "error during SaleErrored run", error = err.msg - -method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleErrored, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -30,8 +29,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} = requestId = data.requestId, slotIndex = data.slotIndex - if onClear =? context.onClear and request =? data.request: - onClear(request, data.slotIndex) + try: + if onClear =? context.onClear and request =? data.request: + onClear(request, data.slotIndex) - if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot) + if onCleanUp =? agent.onCleanUp: + await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot) + except CancelledError as e: + trace "SaleErrored.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleErrored.run", error = e.msgDetail diff --git a/codex/sales/states/errorhandling.nim b/codex/sales/states/errorhandling.nim deleted file mode 100644 index 2ee399ef..00000000 --- a/codex/sales/states/errorhandling.nim +++ /dev/null @@ -1,8 +0,0 @@ -import pkg/questionable -import ../statemachine -import ./errored - -type ErrorHandlingState* = ref object of SaleState - -method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = - some State(SaleErrored(error: error)) diff --git a/codex/sales/states/failed.nim b/codex/sales/states/failed.nim index 6103765c..b0d6a7cd 100644 --- a/codex/sales/states/failed.nim +++ b/codex/sales/states/failed.nim @@ -1,30 +1,39 @@ import ../../logutils +import ../../utils/exceptions +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./errored logScope: topics = "marketplace sales failed" type - SaleFailed* = ref object of ErrorHandlingState + SaleFailed* = ref object of SaleState SaleFailedError* = object of SaleError method `$`*(state: SaleFailed): string = "SaleFailed" -method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFailed, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Removing slot from mySlots", - requestId = data.requestId, slotIndex = data.slotIndex - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Removing slot from mySlots", + requestId = data.requestId, slotIndex = data.slotIndex + await market.freeSlot(slot.id) - let error = newException(SaleFailedError, "Sale failed") - return some State(SaleErrored(error: error)) + let error = newException(SaleFailedError, "Sale failed") + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleFailed.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFailed.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/filled.nim b/codex/sales/states/filled.nim index 9e7d9906..b0fc65c9 100644 --- a/codex/sales/states/filled.nim +++ b/codex/sales/states/filled.nim @@ -3,9 +3,9 @@ import pkg/questionable/results import ../../conf import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./errored import ./cancelled import ./failed @@ -18,7 +18,7 @@ logScope: topics = "marketplace sales filled" type - SaleFilled* = ref object of ErrorHandlingState + SaleFilled* = ref object of SaleState HostMismatchError* = object of CatchableError method onCancelled*(state: SaleFilled, request: StorageRequest): ?State = @@ -30,40 +30,48 @@ method onFailed*(state: SaleFilled, request: StorageRequest): ?State = method `$`*(state: SaleFilled): string = "SaleFilled" -method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFilled, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context - let market = context.market - let host = await market.getHost(data.requestId, data.slotIndex) - let me = await market.getSigner() - if host == me.some: - info "Slot succesfully filled", - requestId = data.requestId, slotIndex = data.slotIndex + try: + let host = await market.getHost(data.requestId, data.slotIndex) + let me = await market.getSigner() - without request =? data.request: - raiseAssert "no sale request" + if host == me.some: + info "Slot succesfully filled", + requestId = data.requestId, slotIndex = data.slotIndex - if onFilled =? agent.onFilled: - onFilled(request, data.slotIndex) + without request =? data.request: + raiseAssert "no sale request" - without onExpiryUpdate =? context.onExpiryUpdate: - raiseAssert "onExpiryUpdate callback not set" + if onFilled =? agent.onFilled: + onFilled(request, data.slotIndex) - let requestEnd = await market.getRequestEnd(data.requestId) - if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: - return some State(SaleErrored(error: err)) + without onExpiryUpdate =? context.onExpiryUpdate: + raiseAssert "onExpiryUpdate callback not set" - when codex_enable_proof_failures: - if context.simulateProofFailures > 0: - info "Proving with failure rate", rate = context.simulateProofFailures - return some State( - SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) - ) + let requestEnd = await market.getRequestEnd(data.requestId) + if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: + return some State(SaleErrored(error: err)) - return some State(SaleProving()) - else: - let error = newException(HostMismatchError, "Slot filled by other host") - return some State(SaleErrored(error: error)) + when codex_enable_proof_failures: + if context.simulateProofFailures > 0: + info "Proving with failure rate", rate = context.simulateProofFailures + return some State( + SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) + ) + + return some State(SaleProving()) + else: + let error = newException(HostMismatchError, "Slot filled by other host") + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleFilled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilled.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 1934fc12..0c20a64e 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -1,9 +1,9 @@ import pkg/stint import ../../logutils import ../../market +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./filled import ./cancelled import ./failed @@ -13,7 +13,7 @@ import ./errored logScope: topics = "marketplace sales filling" -type SaleFilling* = ref object of ErrorHandlingState +type SaleFilling* = ref object of SaleState proof*: Groth16Proof method `$`*(state: SaleFilling): string = @@ -25,7 +25,9 @@ method onCancelled*(state: SaleFilling, request: StorageRequest): ?State = method onFailed*(state: SaleFilling, request: StorageRequest): ?State = return some State(SaleFailed()) -method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFilling, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without (request =? data.request): @@ -35,28 +37,34 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = requestId = data.requestId slotIndex = data.slotIndex - let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) - let requestedCollateral = request.ask.collateralPerSlot - var collateral: UInt256 - - if slotState == SlotState.Repair: - # When repairing the node gets "discount" on the collateral that it needs to - let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = - requestedCollateral - - ((requestedCollateral * repairRewardPercentage)).div(100.u256) - else: - collateral = requestedCollateral - - debug "Filling slot" try: - await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) - except MarketError as e: - if e.msg.contains "Slot is not free": - debug "Slot is already filled, ignoring slot" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) - # other CatchableErrors are handled "automatically" by the ErrorHandlingState + let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) + let requestedCollateral = request.ask.collateralPerSlot + var collateral: UInt256 - return some State(SaleFilled()) + if slotState == SlotState.Repair: + # When repairing the node gets "discount" on the collateral that it needs to + let repairRewardPercentage = (await market.repairRewardPercentage).u256 + collateral = + requestedCollateral - + ((requestedCollateral * repairRewardPercentage)).div(100.u256) + else: + collateral = requestedCollateral + + debug "Filling slot" + try: + await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) + except MarketError as e: + if e.msg.contains "Slot is not free": + debug "Slot is already filled, ignoring slot" + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + else: + return some State(SaleErrored(error: e)) + # other CatchableErrors are handled "automatically" by the SaleState + + return some State(SaleFilled()) + except CancelledError as e: + trace "SaleFilling.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilling.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim index 151300d0..2aba69eb 100644 --- a/codex/sales/states/finished.nim +++ b/codex/sales/states/finished.nim @@ -1,16 +1,17 @@ import pkg/chronos import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./cancelled import ./failed +import ./errored logScope: topics = "marketplace sales finished" -type SaleFinished* = ref object of ErrorHandlingState +type SaleFinished* = ref object of SaleState returnedCollateral*: ?UInt256 method `$`*(state: SaleFinished): string = @@ -22,7 +23,9 @@ method onCancelled*(state: SaleFinished, request: StorageRequest): ?State = method onFailed*(state: SaleFinished, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFinished, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data @@ -32,5 +35,11 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex - if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnedCollateral = state.returnedCollateral) + try: + if onCleanUp =? agent.onCleanUp: + await onCleanUp(returnedCollateral = state.returnedCollateral) + except CancelledError as e: + trace "SaleFilled.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilled.run in onCleanUp callback", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim index b915bff5..b07a201c 100644 --- a/codex/sales/states/ignored.nim +++ b/codex/sales/states/ignored.nim @@ -1,9 +1,10 @@ import pkg/chronos import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling +import ./errored logScope: topics = "marketplace sales ignored" @@ -11,17 +12,25 @@ logScope: # Ignored slots could mean there was no availability or that the slot could # not be reserved. -type SaleIgnored* = ref object of ErrorHandlingState +type SaleIgnored* = ref object of SaleState reprocessSlot*: bool # readd slot to queue with `seen` flag returnBytes*: bool # return unreleased bytes from Reservation to Availability method `$`*(state: SaleIgnored): string = "SaleIgnored" -method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleIgnored, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) - if onCleanUp =? agent.onCleanUp: - await onCleanUp( - reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes - ) + try: + if onCleanUp =? agent.onCleanUp: + await onCleanUp( + reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes + ) + except CancelledError as e: + trace "SaleIgnored.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleIgnored.run in onCleanUp", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim index bc9ce6b6..57e8cc2c 100644 --- a/codex/sales/states/initialproving.nim +++ b/codex/sales/states/initialproving.nim @@ -1,9 +1,9 @@ import pkg/questionable/results import ../../clock import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./filling import ./cancelled import ./errored @@ -12,7 +12,7 @@ import ./failed logScope: topics = "marketplace sales initial-proving" -type SaleInitialProving* = ref object of ErrorHandlingState +type SaleInitialProving* = ref object of SaleState method `$`*(state: SaleInitialProving): string = "SaleInitialProving" @@ -25,9 +25,9 @@ method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State = proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} = trace "Waiting until next period" - let period = periodicity.periodOf(clock.now().u256) - let periodEnd = periodicity.periodEnd(period).truncate(int64) - await clock.waitUntil(periodEnd + 1) + let period = periodicity.periodOf(clock.now().Timestamp) + let periodEnd = periodicity.periodEnd(period) + await clock.waitUntil((periodEnd + 1).toSecondsSince1970) proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} = let periodicity = await market.periodicity() @@ -36,7 +36,9 @@ proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.asyn while (await market.getPointer(slotId)) > (256 - downtime): await clock.waitUntilNextPeriod(periodicity) -method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleInitialProving, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let context = SalesAgent(machine).context let market = context.market @@ -48,16 +50,22 @@ method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async without onProve =? context.onProve: raiseAssert "onProve callback not set" - debug "Waiting for a proof challenge that is valid for the entire period" - let slot = Slot(request: request, slotIndex: data.slotIndex) - await waitForStableChallenge(market, clock, slot.id) + try: + debug "Waiting for a proof challenge that is valid for the entire period" + let slot = Slot(request: request, slotIndex: data.slotIndex) + await waitForStableChallenge(market, clock, slot.id) - debug "Generating initial proof", requestId = data.requestId - let challenge = await context.market.getChallenge(slot.id) - without proof =? (await onProve(slot, challenge)), err: - error "Failed to generate initial proof", error = err.msg - return some State(SaleErrored(error: err)) + debug "Generating initial proof", requestId = data.requestId + let challenge = await context.market.getChallenge(slot.id) + without proof =? (await onProve(slot, challenge)), err: + error "Failed to generate initial proof", error = err.msg + return some State(SaleErrored(error: err)) - debug "Finished proof calculation", requestId = data.requestId + debug "Finished proof calculation", requestId = data.requestId - return some State(SaleFilling(proof: proof)) + return some State(SaleFilling(proof: proof)) + except CancelledError as e: + trace "SaleInitialProving.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleInitialProving.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/payout.nim b/codex/sales/states/payout.nim index 9ce36613..e808307d 100644 --- a/codex/sales/states/payout.nim +++ b/codex/sales/states/payout.nim @@ -1,16 +1,17 @@ import ../../logutils import ../../market +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./cancelled import ./failed import ./finished +import ./errored logScope: topics = "marketplace sales payout" -type SalePayout* = ref object of ErrorHandlingState +type SalePayout* = ref object of SaleState method `$`*(state: SalePayout): string = "SalePayout" @@ -21,17 +22,25 @@ method onCancelled*(state: SalePayout, request: StorageRequest): ?State = method onFailed*(state: SalePayout, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SalePayout, machine: Machine): Future[?State] {.async.} = +method run*( + state: SalePayout, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting finished slot's reward", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Collecting finished slot's reward", + requestId = data.requestId, slotIndex = data.slotIndex + let currentCollateral = await market.currentCollateral(slot.id) + await market.freeSlot(slot.id) - return some State(SaleFinished(returnedCollateral: some currentCollateral)) + return some State(SaleFinished(returnedCollateral: some currentCollateral)) + except CancelledError as e: + trace "SalePayout.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SalePayout.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index bdde1249..443aee0b 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -4,9 +4,9 @@ import pkg/metrics import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./filled @@ -18,7 +18,7 @@ declareCounter( codex_reservations_availability_mismatch, "codex reservations availability_mismatch" ) -type SalePreparing* = ref object of ErrorHandlingState +type SalePreparing* = ref object of SaleState logScope: topics = "marketplace sales preparing" @@ -33,66 +33,74 @@ method onFailed*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleFailed()) method onSlotFilled*( - state: SalePreparing, requestId: RequestId, slotIndex: UInt256 + state: SalePreparing, requestId: RequestId, slotIndex: uint64 ): ?State = return some State(SaleFilled()) -method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = +method run*( + state: SalePreparing, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context let market = context.market let reservations = context.reservations - await agent.retrieveRequest() - await agent.subscribe() + try: + await agent.retrieveRequest() + await agent.subscribe() - without request =? data.request: - raiseAssert "no sale request" + without request =? data.request: + raiseAssert "no sale request" - let slotId = slotId(data.requestId, data.slotIndex) - let state = await market.slotState(slotId) - if state != SlotState.Free and state != SlotState.Repair: - return some State(SaleIgnored(reprocessSlot: false, returnBytes: false)) + let slotId = slotId(data.requestId, data.slotIndex) + let state = await market.slotState(slotId) + if state != SlotState.Free and state != SlotState.Repair: + return some State(SaleIgnored(reprocessSlot: false, returnBytes: false)) - # TODO: Once implemented, check to ensure the host is allowed to fill the slot, - # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) + # TODO: Once implemented, check to ensure the host is allowed to fill the slot, + # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) - logScope: - slotIndex = data.slotIndex - slotSize = request.ask.slotSize - duration = request.ask.duration - pricePerBytePerSecond = request.ask.pricePerBytePerSecond - collateralPerByte = request.ask.collateralPerByte + logScope: + slotIndex = data.slotIndex + slotSize = request.ask.slotSize + duration = request.ask.duration + pricePerBytePerSecond = request.ask.pricePerBytePerSecond + collateralPerByte = request.ask.collateralPerByte - without availability =? - await reservations.findAvailability( - request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, - request.ask.collateralPerByte, - ): - debug "No availability found for request, ignoring" + without availability =? + await reservations.findAvailability( + request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, + request.ask.collateralPerByte, + ): + debug "No availability found for request, ignoring" - return some State(SaleIgnored(reprocessSlot: true)) - - info "Availability found for request, creating reservation" - - without reservation =? - await reservations.createReservation( - availability.id, request.ask.slotSize, request.id, data.slotIndex, - request.ask.collateralPerByte, - ), error: - trace "Creation of reservation failed" - # Race condition: - # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. - # Should createReservation fail because there's no space, we proceed to SaleIgnored. - if error of BytesOutOfBoundsError: - # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it - codex_reservations_availability_mismatch.inc() return some State(SaleIgnored(reprocessSlot: true)) - return some State(SaleErrored(error: error)) + info "Availability found for request, creating reservation" - trace "Reservation created succesfully" + without reservation =? + await reservations.createReservation( + availability.id, request.ask.slotSize, request.id, data.slotIndex, + request.ask.collateralPerByte, + ), error: + trace "Creation of reservation failed" + # Race condition: + # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. + # Should createReservation fail because there's no space, we proceed to SaleIgnored. + if error of BytesOutOfBoundsError: + # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it + codex_reservations_availability_mismatch.inc() + return some State(SaleIgnored(reprocessSlot: true)) - data.reservation = some reservation - return some State(SaleSlotReserving()) + return some State(SaleErrored(error: error)) + + trace "Reservation created successfully" + + data.reservation = some reservation + return some State(SaleSlotReserving()) + except CancelledError as e: + trace "SalePreparing.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SalePreparing.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim index 0ee2ed60..690e9136 100644 --- a/codex/sales/states/proving.nim +++ b/codex/sales/states/proving.nim @@ -6,7 +6,6 @@ import ../../utils/exceptions import ../statemachine import ../salesagent import ../salescontext -import ./errorhandling import ./cancelled import ./failed import ./errored @@ -18,7 +17,7 @@ logScope: type SlotFreedError* = object of CatchableError SlotNotFilledError* = object of CatchableError - SaleProving* = ref object of ErrorHandlingState + SaleProving* = ref object of SaleState loop: Future[void] method prove*( @@ -47,7 +46,7 @@ proc proveLoop( market: Market, clock: Clock, request: StorageRequest, - slotIndex: UInt256, + slotIndex: uint64, onProve: OnProve, ) {.async.} = let slot = Slot(request: request, slotIndex: slotIndex) @@ -61,12 +60,12 @@ proc proveLoop( proc getCurrentPeriod(): Future[Period] {.async.} = let periodicity = await market.periodicity() - return periodicity.periodOf(clock.now().u256) + return periodicity.periodOf(clock.now().Timestamp) proc waitUntilPeriod(period: Period) {.async.} = let periodicity = await market.periodicity() # Ensure that we're past the period boundary by waiting an additional second - await clock.waitUntil(periodicity.periodStart(period).truncate(int64) + 1) + await clock.waitUntil((periodicity.periodStart(period) + 1).toSecondsSince1970) while true: let currentPeriod = await getCurrentPeriod() @@ -113,7 +112,9 @@ method onFailed*(state: SaleProving, request: StorageRequest): ?State = # state change return some State(SaleFailed()) -method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleProving, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let context = SalesAgent(machine).context @@ -129,27 +130,37 @@ method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = without clock =? context.clock: raiseAssert("clock not set") - debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex try: - let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) - state.loop = loop - await loop - except CancelledError: - discard + debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex + try: + let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) + state.loop = loop + await loop + except CancelledError as e: + trace "proving loop cancelled" + discard + except CatchableError as e: + error "Proving failed", + msg = e.msg, typ = $(type e), stack = e.getStackTrace(), error = e.msgDetail + return some State(SaleErrored(error: e)) + finally: + # Cleanup of the proving loop + debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex + + if not state.loop.isNil: + if not state.loop.finished: + try: + await state.loop.cancelAndWait() + except CancelledError: + discard + except CatchableError as e: + error "Error during cancellation of proving loop", msg = e.msg + + state.loop = nil + + return some State(SalePayout()) + except CancelledError as e: + trace "SaleProving.run onCleanUp was cancelled", error = e.msgDetail except CatchableError as e: - error "Proving failed", msg = e.msg + error "Error during SaleProving.run", error = e.msgDetail return some State(SaleErrored(error: e)) - finally: - # Cleanup of the proving loop - debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex - - if not state.loop.isNil: - if not state.loop.finished: - try: - await state.loop.cancelAndWait() - except CatchableError as e: - error "Error during cancellation of proving loop", msg = e.msg - - state.loop = nil - - return some State(SalePayout()) diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index e60169bc..b8a3e9ce 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -4,12 +4,14 @@ when codex_enable_proof_failures: import pkg/stint import pkg/ethers + import ../../contracts/marketplace import ../../contracts/requests import ../../logutils import ../../market import ../../utils/exceptions import ../salescontext import ./proving + import ./errored logScope: topics = "marketplace sales simulated-proving" @@ -18,7 +20,7 @@ when codex_enable_proof_failures: failEveryNProofs*: int proofCount: int - proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = + proc onSubmitProofError(error: ref CatchableError, period: Period, slotId: SlotId) = error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail method prove*( @@ -29,22 +31,27 @@ when codex_enable_proof_failures: market: Market, currentPeriod: Period, ) {.async.} = - trace "Processing proving in simulated mode" - state.proofCount += 1 - if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: - state.proofCount = 0 + try: + trace "Processing proving in simulated mode" + state.proofCount += 1 + if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: + state.proofCount = 0 - try: - warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id - await market.submitProof(slot.id, Groth16Proof.default) - except MarketError as e: - if not e.msg.contains("Invalid proof"): + try: + warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id + await market.submitProof(slot.id, Groth16Proof.default) + except Proofs_InvalidProof as e: + discard # expected + except CancelledError as error: + raise error + except CatchableError as e: onSubmitProofError(e, currentPeriod, slot.id) - except CancelledError as error: - raise error - except CatchableError as e: - onSubmitProofError(e, currentPeriod, slot.id) - else: - await procCall SaleProving(state).prove( - slot, challenge, onProve, market, currentPeriod - ) + else: + await procCall SaleProving(state).prove( + slot, challenge, onProve, market, currentPeriod + ) + except CancelledError as e: + trace "Submitting INVALID proof cancelled", error = e.msgDetail + raise e + except CatchableError as e: + error "Submitting INVALID proof failed", error = e.msgDetail diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index 38b7fa76..a67c51a0 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -3,16 +3,16 @@ import pkg/metrics import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./ignored import ./downloading import ./errored -type SaleSlotReserving* = ref object of ErrorHandlingState +type SaleSlotReserving* = ref object of SaleState logScope: topics = "marketplace sales reserving" @@ -26,7 +26,9 @@ method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State = method onFailed*(state: SaleSlotReserving, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleSlotReserving, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -36,23 +38,29 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async. requestId = data.requestId slotIndex = data.slotIndex - let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex) - if canReserve: - try: - trace "Reserving slot" - await market.reserveSlot(data.requestId, data.slotIndex) - except MarketError as e: - if e.msg.contains "SlotReservations_ReservationNotAllowed": - debug "Slot cannot be reserved, ignoring", error = e.msg - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) - # other CatchableErrors are handled "automatically" by the ErrorHandlingState + try: + let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex) + if canReserve: + try: + trace "Reserving slot" + await market.reserveSlot(data.requestId, data.slotIndex) + except MarketError as e: + if e.msg.contains "SlotReservations_ReservationNotAllowed": + debug "Slot cannot be reserved, ignoring", error = e.msg + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + else: + return some State(SaleErrored(error: e)) + # other CatchableErrors are handled "automatically" by the SaleState - trace "Slot successfully reserved" - return some State(SaleDownloading()) - else: - # do not re-add this slot to the queue, and return bytes from Reservation to - # the Availability - debug "Slot cannot be reserved, ignoring" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + trace "Slot successfully reserved" + return some State(SaleDownloading()) + else: + # do not re-add this slot to the queue, and return bytes from Reservation to + # the Availability + debug "Slot cannot be reserved, ignoring" + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + except CancelledError as e: + trace "SaleSlotReserving.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleSlotReserving.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/unknown.nim b/codex/sales/states/unknown.nim index 3034129a..d182d744 100644 --- a/codex/sales/states/unknown.nim +++ b/codex/sales/states/unknown.nim @@ -1,4 +1,5 @@ import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent import ./filled @@ -26,34 +27,42 @@ method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State = method onFailed*(state: SaleUnknown, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleUnknown, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let market = agent.context.market - await agent.retrieveRequest() - await agent.subscribe() + try: + await agent.retrieveRequest() + await agent.subscribe() - let slotId = slotId(data.requestId, data.slotIndex) - let slotState = await market.slotState(slotId) + let slotId = slotId(data.requestId, data.slotIndex) + let slotState = await market.slotState(slotId) - case slotState - of SlotState.Free: - let error = - newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") - return some State(SaleErrored(error: error)) - of SlotState.Filled: - return some State(SaleFilled()) - of SlotState.Finished: - return some State(SalePayout()) - of SlotState.Paid: - return some State(SaleFinished()) - of SlotState.Failed: - return some State(SaleFailed()) - of SlotState.Cancelled: - return some State(SaleCancelled()) - of SlotState.Repair: - let error = newException( - SlotFreedError, "Slot was forcible freed and host was removed from its hosting" - ) - return some State(SaleErrored(error: error)) + case slotState + of SlotState.Free: + let error = + newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") + return some State(SaleErrored(error: error)) + of SlotState.Filled: + return some State(SaleFilled()) + of SlotState.Finished: + return some State(SalePayout()) + of SlotState.Paid: + return some State(SaleFinished()) + of SlotState.Failed: + return some State(SaleFailed()) + of SlotState.Cancelled: + return some State(SaleCancelled()) + of SlotState.Repair: + let error = newException( + SlotFreedError, "Slot was forcible freed and host was removed from its hosting" + ) + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleUnknown.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleUnknown.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 30332f1c..74597ff1 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -189,7 +189,7 @@ proc getCellHashes*[T, H]( blkIdx = blkIdx pos = i - trace "Getting block CID for tree at index" + trace "Getting block CID for tree at index", index = blkIdx without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root, err: error "Failed to get block CID for tree at index", err = err.msg diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index dcacbd62..125741e1 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -57,6 +57,17 @@ proc putLeafMetadata*( (md.some, res), ) +proc delLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = + without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: + return failure(err) + + if err =? (await self.metaDs.delete(key)).errorOption: + return failure(err) + + success() + proc getLeafMetadata*( self: RepoStore, treeCid: Cid, index: Natural ): Future[?!LeafMetadata] {.async.} = @@ -205,9 +216,6 @@ proc storeBlock*( proc tryDeleteBlock*( self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low ): Future[?!DeleteResult] {.async.} = - if cid.isEmpty: - return success(DeleteResult(kind: InUse)) - without metaKey =? createBlockExpirationMetadataKey(cid), err: return failure(err) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index 2b14d6b7..d7305107 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -186,13 +186,13 @@ method putBlock*( return success() -method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = - ## Delete a block from the blockstore when block refCount is 0 or block is expired - ## - +proc delBlockInternal(self: RepoStore, cid: Cid): Future[?!DeleteResultKind] {.async.} = logScope: cid = cid + if cid.isEmpty: + return success(Deleted) + trace "Attempting to delete a block" without res =? await self.tryDeleteBlock(cid, self.clock.now()), err: @@ -205,12 +205,28 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = if err =? (await self.updateQuotaUsage(minusUsed = res.released)).errorOption: return failure(err) - elif res.kind == InUse: - trace "Block in use, refCount > 0 and not expired" - else: - trace "Block not found in store" - return success() + success(res.kind) + +method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = + ## Delete a block from the blockstore when block refCount is 0 or block is expired + ## + + logScope: + cid = cid + + without outcome =? await self.delBlockInternal(cid), err: + return failure(err) + + case outcome + of InUse: + failure("Directly deleting a block that is part of a dataset is not allowed.") + of NotFound: + trace "Block not found, ignoring" + success() + of Deleted: + trace "Block already deleted" + success() method delBlock*( self: RepoStore, treeCid: Cid, index: Natural @@ -221,12 +237,19 @@ method delBlock*( else: return failure(err) + if err =? (await self.delLeafMetadata(treeCid, index)).errorOption: + error "Failed to delete leaf metadata, block will remain on disk.", err = err.msg + return failure(err) + if err =? (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: if not (err of BlockNotFoundError): return failure(err) - await self.delBlock(leafMd.blkCid) # safe delete, only if refCount == 0 + without _ =? await self.delBlockInternal(leafMd.blkCid), err: + return failure(err) + + success() method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = ## Check if the block exists in the blockstore @@ -295,6 +318,18 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query = let queryKey = ?createBlockExpirationMetadataQueryKey() success Query.init(queryKey, offset = offset, limit = maxNumber) +proc blockRefCount*(self: RepoStore, cid: Cid): Future[?!Natural] {.async.} = + ## Returns the reference count for a block. If the count is zero; + ## this means the block is eligible for garbage collection. + ## + without key =? createBlockExpirationMetadataKey(cid), err: + return failure(err) + + without md =? await get[BlockMetadata](self.metaDs, key), err: + return failure(err) + + return success(md.refCount) + method getBlockExpirations*( self: RepoStore, maxNumber: int, offset: int ): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index 85b0e354..a68e2ea7 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -110,7 +110,7 @@ method readOnce*( raise newLPStreamReadError(error) trace "Reading bytes from store stream", - manifestCid = self.manifest.cid.get(), + manifestCid = self.manifest.treeCid, numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, diff --git a/codex/utils/arrayutils.nim b/codex/utils/arrayutils.nim new file mode 100644 index 00000000..c398921f --- /dev/null +++ b/codex/utils/arrayutils.nim @@ -0,0 +1,25 @@ +import std/sequtils + +proc createDoubleArray*( + outerLen, innerLen: int +): ptr UncheckedArray[ptr UncheckedArray[byte]] = + # Allocate outer array + result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](allocShared0( + sizeof(ptr UncheckedArray[byte]) * outerLen + )) + + # Allocate each inner array + for i in 0 ..< outerLen: + result[i] = cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * innerLen)) + +proc freeDoubleArray*( + arr: ptr UncheckedArray[ptr UncheckedArray[byte]], outerLen: int +) = + # Free each inner array + for i in 0 ..< outerLen: + if not arr[i].isNil: + deallocShared(arr[i]) + + # Free outer array + if not arr.isNil: + deallocShared(arr) diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index 1b0dd8bc..bc37c462 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -9,7 +9,7 @@ import std/sequtils import pkg/chronos -import pkg/stew/results +import pkg/results # Based on chronos AsyncHeapQueue and std/heapqueue diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 572ae246..2d87ebc1 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -2,6 +2,7 @@ import pkg/questionable import pkg/chronos import ../logutils import ./trackedfutures +import ./exceptions {.push raises: [].} @@ -46,24 +47,14 @@ proc schedule*(machine: Machine, event: Event) = except AsyncQueueFullError: raiseAssert "unlimited queue is full?!" -method run*(state: State, machine: Machine): Future[?State] {.base, async.} = +method run*( + state: State, machine: Machine +): Future[?State] {.base, async: (raises: []).} = discard -method onError*(state: State, error: ref CatchableError): ?State {.base.} = - raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error) - -proc onError(machine: Machine, error: ref CatchableError): Event = - return proc(state: State): ?State = - state.onError(error) - proc run(machine: Machine, state: State) {.async: (raises: []).} = - try: - if next =? await state.run(machine): - machine.schedule(Event.transition(state, next)) - except CancelledError: - discard # do not propagate - except CatchableError as e: - machine.schedule(machine.onError(e)) + if next =? await state.run(machine): + machine.schedule(Event.transition(state, next)) proc scheduler(machine: Machine) {.async: (raises: []).} = var running: Future[void].Raising([]) diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 8a641e95..43909588 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,7 +1,6 @@ {.push raises: [].} -import - std/[tables, hashes], stew/results, stew/shims/net as stewNet, chronos, chronicles +import std/[tables, hashes], pkg/results, stew/shims/net as stewNet, chronos, chronicles import pkg/libp2p diff --git a/codex/validation.nim b/codex/validation.nim index 6e3135e4..18a444a6 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -2,6 +2,7 @@ import std/sets import std/sequtils import pkg/chronos import pkg/questionable/results +import pkg/stew/endians2 import ./validationconfig import ./market @@ -19,11 +20,9 @@ type Validation* = ref object subscriptions: seq[Subscription] running: Future[void] periodicity: Periodicity - proofTimeout: UInt256 + proofTimeout: uint64 config: ValidationConfig -const MaxStorageRequestDuration = 30.days - logScope: topics = "codex validator" @@ -35,18 +34,19 @@ proc new*( proc slots*(validation: Validation): seq[SlotId] = validation.slots.toSeq -proc getCurrentPeriod(validation: Validation): UInt256 = - return validation.periodicity.periodOf(validation.clock.now().u256) +proc getCurrentPeriod(validation: Validation): Period = + return validation.periodicity.periodOf(validation.clock.now().Timestamp) proc waitUntilNextPeriod(validation: Validation) {.async.} = let period = validation.getCurrentPeriod() let periodEnd = validation.periodicity.periodEnd(period) trace "Waiting until next period", currentPeriod = period - await validation.clock.waitUntil(periodEnd.truncate(int64) + 1) + await validation.clock.waitUntil((periodEnd + 1).toSecondsSince1970) func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 = - let slotIdUInt256 = UInt256.fromBytesBE(slotId.toArray) - (slotIdUInt256 mod validationGroups.u256).truncate(uint16) + let a = slotId.toArray + let slotIdInt64 = uint64.fromBytesBE(a) + (slotIdInt64 mod uint64(validationGroups)).uint16 func maxSlotsConstraintRespected(validation: Validation): bool = validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots @@ -57,7 +57,7 @@ func shouldValidateSlot(validation: Validation, slotId: SlotId): bool = groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex proc subscribeSlotFilled(validation: Validation) {.async.} = - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = if not validation.maxSlotsConstraintRespected: return let slotId = slotId(requestId, slotIndex) @@ -115,14 +115,13 @@ proc run(validation: Validation) {.async: (raises: []).} = except CatchableError as e: error "Validation failed", msg = e.msg -proc epochForDurationBackFromNow( - validation: Validation, duration: Duration -): SecondsSince1970 = - return validation.clock.now - duration.secs +proc findEpoch(validation: Validation, secondsAgo: uint64): SecondsSince1970 = + return validation.clock.now - secondsAgo.int64 proc restoreHistoricalState(validation: Validation) {.async.} = trace "Restoring historical state..." - let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration) + let requestDurationLimit = await validation.market.requestDurationLimit + let startTimeEpoch = validation.findEpoch(secondsAgo = requestDurationLimit) let slotFilledEvents = await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch) for event in slotFilledEvents: diff --git a/config.nims b/config.nims index 6a4767ad..05a31fff 100644 --- a/config.nims +++ b/config.nims @@ -1,21 +1,24 @@ - include "build.nims" import std/os const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)] when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and - # BEWARE - # In Nim 1.6, config files are evaluated with a working directory - # matching where the Nim command was invocated. This means that we - # must do all file existence checks with full absolute paths: - system.fileExists(currentDir & "nimbus-build-system.paths"): + # BEWARE + # In Nim 1.6, config files are evaluated with a working directory + # matching where the Nim command was invocated. This means that we + # must do all file existence checks with full absolute paths: + system.fileExists(currentDir & "nimbus-build-system.paths"): include "nimbus-build-system.paths" when defined(release): - switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName")) + switch( + "nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName") + ) else: - switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName")) + switch( + "nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName") + ) when defined(limitStackUsage): # This limits stack usage of each individual function to 1MB - the option is @@ -34,7 +37,8 @@ when defined(windows): # increase stack size switch("passL", "-Wl,--stack,8388608") # https://github.com/nim-lang/Nim/issues/4057 - --tlsEmulation:off + --tlsEmulation: + off if defined(i386): # set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM switch("passL", "-Wl,--large-address-aware") @@ -63,30 +67,47 @@ else: # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) switch("passC", "-mno-avx512vl") ---tlsEmulation:off ---threads:on ---opt:speed ---excessiveStackTrace:on +--tlsEmulation: + off +--threads: + on +--opt: + speed +--excessiveStackTrace: + on # enable metric collection ---define:metrics +--define: + metrics # for heap-usage-by-instance-type metrics and object base-type strings ---define:nimTypeNames ---styleCheck:usages ---styleCheck:error ---maxLoopIterationsVM:1000000000 ---fieldChecks:on ---warningAsError:"ProveField:on" +--define: + nimTypeNames +--styleCheck: + usages +--styleCheck: + error +--maxLoopIterationsVM: + 1000000000 +--fieldChecks: + on +--warningAsError: + "ProveField:on" when (NimMajor, NimMinor) >= (1, 4): - --warning:"ObservableStores:off" - --warning:"LockLevel:off" - --hint:"XCannotRaiseY:off" + --warning: + "ObservableStores:off" + --warning: + "LockLevel:off" + --hint: + "XCannotRaiseY:off" when (NimMajor, NimMinor) >= (1, 6): - --warning:"DotLikeOps:off" + --warning: + "DotLikeOps:off" when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11): - --warning:"BareExcept:off" + --warning: + "BareExcept:off" when (NimMajor, NimMinor) >= (2, 0): - --mm:refc + --mm: + refc switch("define", "withoutPCRE") @@ -94,10 +115,12 @@ switch("define", "withoutPCRE") # "--debugger:native" build. It can be increased with `ulimit -n 1024`. if not defined(macosx): # add debugging symbols and original files and line numbers - --debugger:native + --debugger: + native if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace): # light-weight stack traces using libbacktrace and libunwind - --define:nimStackTraceOverride + --define: + nimStackTraceOverride switch("import", "libbacktrace") # `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" diff --git a/openapi.yaml b/openapi.yaml index 9d401e8f..70da398b 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -371,12 +371,6 @@ components: nullable: true description: "The original mimetype of the uploaded content (optional)" example: image/png - uploadedAt: - type: integer - format: int64 - nullable: true - description: "The UTC upload timestamp in seconds" - example: 1729244192 Space: type: object diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index 88331c3f..97a455e1 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -96,9 +96,9 @@ asyncchecksuite "Block Advertising and Discovery": await engine.stop() - test "Should advertise both manifests and trees": + test "Should advertise trees": let - cids = @[manifest.cid.tryGet, manifest.treeCid] + cids = @[manifest.treeCid] advertised = initTable.collect: for cid in cids: {cid: newFuture[void]()} diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 69a85db8..ed1dd52a 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -8,6 +8,7 @@ import pkg/codex/stores import pkg/codex/blocktype as bt import pkg/codex/sales import pkg/codex/merkletree +import pkg/codex/manifest import ../examples export examples @@ -36,8 +37,8 @@ proc example*(_: type SignedState): SignedState = proc example*(_: type Pricing): Pricing = Pricing(address: EthAddress.example, price: uint32.rand.u256) -proc example*(_: type bt.Block): bt.Block = - let length = rand(4096) +proc example*(_: type bt.Block, size: int = 4096): bt.Block = + let length = rand(size) let bytes = newSeqWith(length, rand(uint8)) bt.Block.new(bytes).tryGet() @@ -51,6 +52,15 @@ proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx = proc example*(_: type Cid): Cid = bt.Block.example.cid +proc example*(_: type Manifest): Manifest = + Manifest.new( + treeCid = Cid.example, + blockSize = 256.NBytes, + datasetSize = 4096.NBytes, + filename = "example.txt".some, + mimetype = "text/plain".some, + ) + proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = let bytes = newSeqWith(256, rand(uint8)) MultiHash.digest($mcodec, bytes).tryGet() @@ -58,19 +68,19 @@ proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = proc example*( _: type Availability, collateralPerByte = uint8.example.u256 ): Availability = - let totalSize = uint16.example.u256 + let totalSize = uint16.example.uint64 Availability.init( totalSize = totalSize, - freeSize = uint16.example.u256, - duration = uint16.example.u256, + freeSize = uint16.example.uint64, + duration = uint16.example.uint64, minPricePerBytePerSecond = uint8.example.u256, - totalCollateral = totalSize * collateralPerByte, + totalCollateral = totalSize.u256 * collateralPerByte, ) proc example*(_: type Reservation): Reservation = Reservation.init( availabilityId = AvailabilityId(array[32, byte].example), - size = uint16.example.u256, + size = uint16.example.uint64, slotId = SlotId.example, ) diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 6d7415d3..898dd16e 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -85,30 +85,31 @@ proc makeWantList*( ) proc storeDataGetManifest*( - store: BlockStore, chunker: Chunker + store: BlockStore, blocks: seq[Block] ): Future[Manifest] {.async.} = - var cids = newSeq[Cid]() - - while (let chunk = await chunker.getBytes(); chunk.len > 0): - let blk = Block.new(chunk).tryGet() - cids.add(blk.cid) + for blk in blocks: (await store.putBlock(blk)).tryGet() let - tree = CodexTree.init(cids).tryGet() + (manifest, tree) = makeManifestAndTree(blocks).tryGet() treeCid = tree.rootCid.tryGet() - manifest = Manifest.new( - treeCid = treeCid, - blockSize = NBytes(chunker.chunkSize), - datasetSize = NBytes(chunker.offset), - ) for i in 0 ..< tree.leavesCount: let proof = tree.getProof(i).tryGet() - (await store.putCidAndProof(treeCid, i, cids[i], proof)).tryGet() + (await store.putCidAndProof(treeCid, i, blocks[i].cid, proof)).tryGet() return manifest +proc storeDataGetManifest*( + store: BlockStore, chunker: Chunker +): Future[Manifest] {.async.} = + var blocks = newSeq[Block]() + + while (let chunk = await chunker.getBytes(); chunk.len > 0): + blocks.add(Block.new(chunk).tryGet()) + + return await storeDataGetManifest(store, blocks) + proc makeRandomBlocks*( datasetSize: int, blockSize: NBytes ): Future[seq[Block]] {.async.} = diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index bb0eaaa2..48b20f28 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -57,7 +57,7 @@ type MockSlot* = object requestId*: RequestId host*: Address - slotIndex*: UInt256 + slotIndex*: uint64 proof*: Groth16Proof timestamp: ?SecondsSince1970 collateral*: UInt256 @@ -84,7 +84,7 @@ type SlotFilledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId - slotIndex: ?UInt256 + slotIndex: ?uint64 callback: OnSlotFilled SlotFreedSubscription* = ref object of Subscription @@ -122,12 +122,17 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = collateral: CollateralConfig( repairRewardPercentage: 10, maxNumberOfSlashes: 5, - slashCriterion: 3, slashPercentage: 10, + validatorRewardPercentage: 20, ), proofs: ProofConfig( - period: 10.u256, timeout: 5.u256, downtime: 64.uint8, downtimeProduct: 67.uint8 + period: 10.Period, + timeout: 5.uint64, + downtime: 64.uint8, + downtimeProduct: 67.uint8, ), + reservations: SlotReservationsConfig(maxReservations: 3), + requestDurationLimit: (60 * 60 * 24 * 30).uint64, ) MockMarket( signer: Address.example, config: config, canReserveSlot: true, clock: clock @@ -139,9 +144,12 @@ method getSigner*(market: MockMarket): Future[Address] {.async.} = method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = return Periodicity(seconds: mock.config.proofs.period) -method proofTimeout*(market: MockMarket): Future[UInt256] {.async.} = +method proofTimeout*(market: MockMarket): Future[uint64] {.async.} = return market.config.proofs.timeout +method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} = + return market.config.requestDurationLimit + method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = return market.config.proofs.downtime @@ -171,9 +179,9 @@ method getRequest*( return some request return none StorageRequest -method getActiveSlot*(market: MockMarket, slotId: SlotId): Future[?Slot] {.async.} = +method getActiveSlot*(market: MockMarket, id: SlotId): Future[?Slot] {.async.} = for slot in market.filled: - if slotId(slot.requestId, slot.slotIndex) == slotId and + if slotId(slot.requestId, slot.slotIndex) == id and request =? await market.getRequest(slot.requestId): return some Slot(request: request, slotIndex: slot.slotIndex) return none Slot @@ -199,7 +207,7 @@ method requestExpiresAt*( return market.requestExpiry[id] method getHost*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.async.} = for slot in market.filled: if slot.requestId == requestId and slot.slotIndex == slotIndex: @@ -214,7 +222,7 @@ method currentCollateral*( return slot.collateral return 0.u256 -proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = +proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: uint64) = var subscriptions = market.subscriptions.onSlotFilled for subscription in subscriptions: let requestMatches = @@ -224,13 +232,13 @@ proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt25 if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = +proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: uint64) = var subscriptions = market.subscriptions.onSlotFreed for subscription in subscriptions: subscription.callback(requestId, slotIndex) proc emitSlotReservationsFull*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ) = var subscriptions = market.subscriptions.onSlotReservationsFull for subscription in subscriptions: @@ -257,7 +265,7 @@ proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = proc fillSlot*( market: MockMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, host: Address, collateral = 0.u256, @@ -277,7 +285,7 @@ proc fillSlot*( method fillSlot*( market: MockMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, ) {.async.} = @@ -341,13 +349,13 @@ method canProofBeMarkedAsMissing*( return market.canBeMarkedAsMissing.contains(id) method reserveSlot*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = if error =? market.reserveSlotThrowError: raise error method canReserveSlot*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = return market.canReserveSlot @@ -390,7 +398,7 @@ method subscribeSlotFilled*( return subscription method subscribeSlotFilled*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled + market: MockMarket, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled ): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription( market: market, diff --git a/tests/codex/helpers/mockreservations.nim b/tests/codex/helpers/mockreservations.nim index 060790a8..1bc76a09 100644 --- a/tests/codex/helpers/mockreservations.nim +++ b/tests/codex/helpers/mockreservations.nim @@ -24,9 +24,9 @@ proc setCreateReservationThrowError*( method createReservation*( self: MockReservations, availabilityId: AvailabilityId, - slotSize: UInt256, + slotSize: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, collateralPerByte: UInt256, ): Future[?!Reservation] {.async.} = if self.createReservationThrowBytesOutOfBoundsError: diff --git a/tests/codex/helpers/mocksalesagent.nim b/tests/codex/helpers/mocksalesagent.nim index 8374ae1d..d5de265a 100644 --- a/tests/codex/helpers/mocksalesagent.nim +++ b/tests/codex/helpers/mocksalesagent.nim @@ -12,6 +12,6 @@ method onFailed*(agent: SalesAgent, requestId: RequestId) = failedCalled = true method onSlotFilled*( - agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 + agent: SalesAgent, requestId: RequestId, slotIndex: uint64 ) {.base.} = slotFilledCalled = true diff --git a/tests/codex/helpers/mockslotqueueitem.nim b/tests/codex/helpers/mockslotqueueitem.nim index bc0c1047..7a1505ec 100644 --- a/tests/codex/helpers/mockslotqueueitem.nim +++ b/tests/codex/helpers/mockslotqueueitem.nim @@ -4,11 +4,11 @@ import pkg/codex/sales/slotqueue type MockSlotQueueItem* = object requestId*: RequestId slotIndex*: uint16 - slotSize*: UInt256 - duration*: UInt256 + slotSize*: uint64 + duration*: uint64 pricePerBytePerSecond*: UInt256 collateralPerByte*: UInt256 - expiry*: UInt256 + expiry*: uint64 seen*: bool proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem = diff --git a/tests/codex/node/helpers.nim b/tests/codex/node/helpers.nim index 0d72b06b..2d1a87dc 100644 --- a/tests/codex/node/helpers.nim +++ b/tests/codex/node/helpers.nim @@ -6,6 +6,7 @@ import pkg/chronos import pkg/codex/codextypes import pkg/codex/chunker import pkg/codex/stores +import pkg/taskpools import ../../asynctest @@ -118,6 +119,7 @@ template setupAndTearDown*() {.dirty.} = engine = engine, prover = Prover.none, discovery = blockDiscovery, + taskpool = Taskpool.new(), ) teardown: diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index cce6d5bd..11f4f273 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -75,10 +75,9 @@ asyncchecksuite "Test Node - Host contracts": let manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new) manifestCid = manifestBlock.cid - manifestCidStr = $(manifestCid) (await localStore.putBlock(manifestBlock)).tryGet() @@ -99,7 +98,7 @@ asyncchecksuite "Test Node - Host contracts": expectedExpiry: SecondsSince1970 = clock.now + DefaultBlockTtl.seconds + 11123 expiryUpdateCallback = !sales.onExpiryUpdate - (await expiryUpdateCallback(manifestCidStr, expectedExpiry)).tryGet() + (await expiryUpdateCallback(manifestCid, expectedExpiry)).tryGet() for index in 0 ..< manifest.blocksCount: let @@ -116,8 +115,9 @@ asyncchecksuite "Test Node - Host contracts": test "onStore callback": let onStore = !sales.onStore var request = StorageRequest.example - request.content.cid = $verifiableBlock.cid - request.expiry = (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.u256 + request.content.cid = verifiableBlock.cid + request.expiry = + (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.uint64 var fetchedBytes: uint = 0 let onBlocks = proc(blocks: seq[bt.Block]): Future[?!void] {.async.} = @@ -125,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts": fetchedBytes += blk.data.len.uint return success() - (await onStore(request, 1.u256, onBlocks)).tryGet() + (await onStore(request, 1.uint64, onBlocks)).tryGet() check fetchedBytes == 12 * DefaultBlockSize.uint let indexer = verifiable.protectedStrategy.init( diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index e4a9d1f4..0700203d 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -12,6 +12,7 @@ import pkg/questionable/results import pkg/stint import pkg/poseidon2 import pkg/poseidon2/io +import pkg/taskpools import pkg/nitro import pkg/codexdht/discv5/protocol as discv5 @@ -37,6 +38,7 @@ import ../examples import ../helpers import ../helpers/mockmarket import ../helpers/mockclock +import ../slots/helpers import ./helpers @@ -66,7 +68,7 @@ asyncchecksuite "Test Node - Basic": # https://github.com/codex-storage/nim-codex/issues/699 let cstore = CountingStore.new(engine, localStore) - node = CodexNodeRef.new(switch, cstore, engine, blockDiscovery) + node = CodexNodeRef.new(switch, cstore, engine, blockDiscovery, Taskpool.new()) missingCid = Cid.init("zDvZRwzmCvtiyubW9AecnxgLnXK8GrBvpQJBDzToxmzDN6Nrc2CZ").get() @@ -137,7 +139,8 @@ asyncchecksuite "Test Node - Basic": test "Setup purchase request": let - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + erasure = + Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new()) manifest = await storeDataGetManifest(localStore, chunker) manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() @@ -154,15 +157,40 @@ asyncchecksuite "Test Node - Basic": cid = manifestBlock.cid, nodes = 5, tolerance = 2, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 200.u256, + expiry = 200.uint64, collateralPerByte = 1.u256, ) ).tryGet check: (await verifiableBlock.cid in localStore) == true - request.content.cid == $verifiableBlock.cid + request.content.cid == verifiableBlock.cid request.content.merkleRoot == builder.verifyRoot.get.toBytes + + test "Should delete a single block": + let randomBlock = bt.Block.new("Random block".toBytes).tryGet() + (await localStore.putBlock(randomBlock)).tryGet() + check (await randomBlock.cid in localStore) == true + + (await node.delete(randomBlock.cid)).tryGet() + check (await randomBlock.cid in localStore) == false + + test "Should delete an entire dataset": + let + blocks = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) + manifest = await storeDataGetManifest(localStore, blocks) + manifestBlock = (await store.storeManifest(manifest)).tryGet() + manifestCid = manifestBlock.cid + + check await manifestCid in localStore + for blk in blocks: + check await blk.cid in localStore + + (await node.delete(manifestCid)).tryGet() + + check not await manifestCid in localStore + for blk in blocks: + check not (await blk.cid in localStore) diff --git a/tests/codex/sales/helpers/periods.nim b/tests/codex/sales/helpers/periods.nim index ba1793c2..99716cec 100644 --- a/tests/codex/sales/helpers/periods.nim +++ b/tests/codex/sales/helpers/periods.nim @@ -3,6 +3,6 @@ import ../../helpers/mockclock proc advanceToNextPeriod*(clock: MockClock, market: Market) {.async.} = let periodicity = await market.periodicity() - let period = periodicity.periodOf(clock.now().u256) + let period = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(period) - clock.set((periodEnd + 1).truncate(int)) + clock.set(periodEnd.toSecondsSince1970 + 1) diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index d2568b98..48f3e8a0 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -14,7 +14,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'cancelled'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testdownloading.nim b/tests/codex/sales/states/testdownloading.nim index e13ac53e..3df45749 100644 --- a/tests/codex/sales/states/testdownloading.nim +++ b/tests/codex/sales/states/testdownloading.nim @@ -10,7 +10,7 @@ import ../../helpers checksuite "sales state 'downloading'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var state: SaleDownloading setup: diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim index 9c8ee17a..07e325e3 100644 --- a/tests/codex/sales/states/testerrored.nim +++ b/tests/codex/sales/states/testerrored.nim @@ -14,7 +14,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'errored'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index f8f77da6..04ff26db 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -16,7 +16,7 @@ import ../../helpers checksuite "sales state 'filled'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var market: MockMarket var slot: MockSlot @@ -36,7 +36,7 @@ checksuite "sales state 'filled'": market.requestEnds[request.id] = 321 onExpiryUpdatePassedExpiry = -1 let onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = onExpiryUpdatePassedExpiry = expiry return success() diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index f0ce7059..ce1d32f2 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -9,7 +9,7 @@ import ../../helpers checksuite "sales state 'filling'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var state: SaleFilling setup: diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim index 4b353014..0c33a7b3 100644 --- a/tests/codex/sales/states/testfinished.nim +++ b/tests/codex/sales/states/testfinished.nim @@ -15,7 +15,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'finished'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim index 1c808e8b..2e1c6e91 100644 --- a/tests/codex/sales/states/testignored.nim +++ b/tests/codex/sales/states/testignored.nim @@ -14,7 +14,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'ignored'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testinitialproving.nim b/tests/codex/sales/states/testinitialproving.nim index 97331a07..cae0a069 100644 --- a/tests/codex/sales/states/testinitialproving.nim +++ b/tests/codex/sales/states/testinitialproving.nim @@ -20,7 +20,7 @@ import ../helpers/periods asyncchecksuite "sales state 'initialproving'": let proof = Groth16Proof.example let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testpayout.nim b/tests/codex/sales/states/testpayout.nim index b1748b45..403c663f 100644 --- a/tests/codex/sales/states/testpayout.nim +++ b/tests/codex/sales/states/testpayout.nim @@ -15,7 +15,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'payout'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim index e78ee25e..99d9c7fe 100644 --- a/tests/codex/sales/states/testpreparing.nim +++ b/tests/codex/sales/states/testpreparing.nim @@ -22,7 +22,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'preparing'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() var agent: SalesAgent @@ -34,9 +34,9 @@ asyncchecksuite "sales state 'preparing'": setup: availability = Availability.init( - totalSize = request.ask.slotSize + 100.u256, - freeSize = request.ask.slotSize + 100.u256, - duration = request.ask.duration + 60.u256, + totalSize = request.ask.slotSize + 100.uint64, + freeSize = request.ask.slotSize + 100.uint64, + duration = request.ask.duration + 60.uint64, minPricePerBytePerSecond = request.ask.pricePerBytePerSecond, totalCollateral = request.ask.collateralPerSlot * request.ask.slots.u256, ) diff --git a/tests/codex/sales/states/testproving.nim b/tests/codex/sales/states/testproving.nim index afdeb4d2..6b7e7bd4 100644 --- a/tests/codex/sales/states/testproving.nim +++ b/tests/codex/sales/states/testproving.nim @@ -40,9 +40,9 @@ asyncchecksuite "sales state 'proving'": proc advanceToNextPeriod(market: Market) {.async.} = let periodicity = await market.periodicity() - let current = periodicity.periodOf(clock.now().u256) + let current = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(current) - clock.set(periodEnd.truncate(int64) + 1) + clock.set(periodEnd.toSecondsSince1970 + 1) test "switches to cancelled state when request expires": let next = state.onCancelled(request) diff --git a/tests/codex/sales/states/testsimulatedproving.nim b/tests/codex/sales/states/testsimulatedproving.nim index 1fc5331c..c8f4ae1d 100644 --- a/tests/codex/sales/states/testsimulatedproving.nim +++ b/tests/codex/sales/states/testsimulatedproving.nim @@ -56,9 +56,9 @@ asyncchecksuite "sales state 'simulated-proving'": proc advanceToNextPeriod(market: Market) {.async.} = let periodicity = await market.periodicity() - let current = periodicity.periodOf(clock.now().u256) + let current = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(current) - clock.set(periodEnd.truncate(int64) + 1) + clock.set(periodEnd.toSecondsSince1970 + 1) proc waitForProvingRounds(market: Market, rounds: int) {.async.} = var rnds = rounds - 1 # proof round runs prior to advancing diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index 1fd573fa..d9ecdfc8 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -19,7 +19,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'SlotReserving'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var market: MockMarket var clock: MockClock var agent: SalesAgent diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index 97730f49..5e9f81f9 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -16,7 +16,7 @@ import ../../helpers checksuite "sales state 'unknown'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let slotId = slotId(request.id, slotIndex) var market: MockMarket diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index a1c7d1a5..79fc3626 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -41,17 +41,17 @@ asyncchecksuite "Reservations module": proc createAvailability(): Availability = let example = Availability.example(collateralPerByte) - let totalSize = rand(100000 .. 200000).u256 - let totalCollateral = totalSize * collateralPerByte + let totalSize = rand(100000 .. 200000).uint64 + let totalCollateral = totalSize.u256 * collateralPerByte let availability = waitFor reservations.createAvailability( totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral ) return availability.get proc createReservation(availability: Availability): Reservation = - let size = rand(1 ..< availability.freeSize.truncate(int)) + let size = rand(1 ..< availability.freeSize.int) let reservation = waitFor reservations.createReservation( - availability.id, size.u256, RequestId.example, UInt256.example, 1.u256 + availability.id, size.uint64, RequestId.example, uint64.example, 1.u256 ) return reservation.get @@ -64,8 +64,8 @@ asyncchecksuite "Reservations module": check (await reservations.all(Availability)).get.len == 0 test "generates unique ids for storage availability": - let availability1 = Availability.init(1.u256, 2.u256, 3.u256, 4.u256, 5.u256) - let availability2 = Availability.init(1.u256, 2.u256, 3.u256, 4.u256, 5.u256) + let availability1 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256) + let availability2 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256) check availability1.id != availability2.id test "can reserve available storage": @@ -75,7 +75,7 @@ asyncchecksuite "Reservations module": test "creating availability reserves bytes in repo": let orig = repo.available.uint let availability = createAvailability() - check repo.available.uint == (orig.u256 - availability.freeSize).truncate(uint) + check repo.available.uint == orig - availability.freeSize test "can get all availabilities": let availability1 = createAvailability() @@ -129,7 +129,7 @@ asyncchecksuite "Reservations module": test "cannot create reservation with non-existant availability": let availability = Availability.example let created = await reservations.createReservation( - availability.id, UInt256.example, RequestId.example, UInt256.example, 1.u256 + availability.id, uint64.example, RequestId.example, uint64.example, 1.u256 ) check created.isErr check created.error of NotExistsError @@ -140,7 +140,7 @@ asyncchecksuite "Reservations module": availability.id, availability.totalSize + 1, RequestId.example, - UInt256.example, + uint64.example, UInt256.example, ) check created.isErr @@ -153,12 +153,12 @@ asyncchecksuite "Reservations module": availability.id, availability.totalSize - 1, RequestId.example, - UInt256.example, + uint64.example, UInt256.example, ) let two = reservations.createReservation( - availability.id, availability.totalSize, RequestId.example, UInt256.example, + availability.id, availability.totalSize, RequestId.example, uint64.example, UInt256.example, ) @@ -228,7 +228,7 @@ asyncchecksuite "Reservations module": let reservation = createReservation(availability) let orig = availability.freeSize - reservation.size let origQuota = repo.quotaReservedBytes - let returnedBytes = reservation.size + 200.u256 + let returnedBytes = reservation.size + 200.uint64 check isOk await reservations.returnBytesToAvailability( reservation.availabilityId, reservation.id, returnedBytes @@ -238,7 +238,7 @@ asyncchecksuite "Reservations module": let updated = !(await reservations.get(key, Availability)) check updated.freeSize > orig - check (updated.freeSize - orig) == 200.u256 + check (updated.freeSize - orig) == 200.uint64 check (repo.quotaReservedBytes - origQuota) == 200.NBytes test "update releases quota when lowering size": @@ -271,14 +271,14 @@ asyncchecksuite "Reservations module": let availability = createAvailability() let reservation = createReservation(availability) let updated = await reservations.release( - reservation.id, reservation.availabilityId, (reservation.size + 1).truncate(uint) + reservation.id, reservation.availabilityId, reservation.size + 1 ) check updated.isErr check updated.error of BytesOutOfBoundsError test "cannot release bytes from non-existant reservation": let availability = createAvailability() - let reservation = createReservation(availability) + discard createReservation(availability) let updated = await reservations.release(ReservationId.example, availability.id, 1) check updated.isErr check updated.error of NotExistsError @@ -297,7 +297,7 @@ asyncchecksuite "Reservations module": var added: Availability reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = added = a - availability.freeSize += 1.u256 + availability.freeSize += 1 discard await reservations.update(availability) check added == availability @@ -307,7 +307,7 @@ asyncchecksuite "Reservations module": var called = false reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = called = true - availability.freeSize -= 1.u256 + availability.freeSize -= 1 discard await reservations.update(availability) check not called @@ -356,14 +356,11 @@ asyncchecksuite "Reservations module": check reservations.hasAvailable(DefaultQuotaBytes.uint - 1) test "reports quota not available to be reserved": - check not reservations.hasAvailable(DefaultQuotaBytes.uint + 1) + check not reservations.hasAvailable(DefaultQuotaBytes.uint64 + 1) test "fails to create availability with size that is larger than available quota": let created = await reservations.createAvailability( - (DefaultQuotaBytes.uint + 1).u256, - UInt256.example, - UInt256.example, - UInt256.example, + DefaultQuotaBytes.uint64 + 1, uint64.example, UInt256.example, UInt256.example ) check created.isErr check created.error of ReserveFailedError diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 0d441f34..f078cbee 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -36,18 +36,21 @@ asyncchecksuite "Sales - start": var repo: RepoStore var queue: SlotQueue var itemsProcessed: seq[SlotQueueItem] + var expiry: SecondsSince1970 setup: request = StorageRequest( ask: StorageAsk( slots: 4, - slotSize: 100.u256, - duration: 60.u256, + slotSize: 100.uint64, + duration: 60.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, ), - content: StorageContent(cid: "some cid"), - expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, + content: StorageContent( + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet + ), + expiry: (getTime() + initDuration(hours = 1)).toUnix.uint64, ) market = MockMarket.new() @@ -59,12 +62,12 @@ asyncchecksuite "Sales - start": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = return success() sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = return success() @@ -74,7 +77,8 @@ asyncchecksuite "Sales - start": ): Future[?!Groth16Proof] {.async.} = return success(proof) itemsProcessed = @[] - request.expiry = (clock.now() + 42).u256 + expiry = (clock.now() + 42) + request.expiry = expiry.uint64 teardown: await sales.stop() @@ -82,7 +86,7 @@ asyncchecksuite "Sales - start": await repoTmp.destroyDb() await metaTmp.destroyDb() - proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + proc fillSlot(slotIdx: uint64 = 0.uint64) {.async.} = let address = await market.getSigner() let slot = MockSlot(requestId: request.id, slotIndex: slotIdx, proof: proof, host: address) @@ -95,16 +99,15 @@ asyncchecksuite "Sales - start": request.ask.slots = 2 market.requested = @[request] market.requestState[request.id] = RequestState.New + market.requestExpiry[request.id] = expiry - let slot0 = - MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) + let slot0 = MockSlot(requestId: request.id, slotIndex: 0, proof: proof, host: me) await fillSlot(slot0.slotIndex) - let slot1 = - MockSlot(requestId: request.id, slotIndex: 1.u256, proof: proof, host: me) + let slot1 = MockSlot(requestId: request.id, slotIndex: 1, proof: proof, host: me) await fillSlot(slot1.slotIndex) - market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.activeSlots[me] = @[request.slotId(0), request.slotId(1)] market.requested = @[request] market.activeRequests[me] = @[request.id] @@ -112,10 +115,10 @@ asyncchecksuite "Sales - start": check eventually sales.agents.len == 2 check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.uint64 ) check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.uint64 ) asyncchecksuite "Sales": @@ -124,7 +127,7 @@ asyncchecksuite "Sales": repoTmp = TempLevelDb.new() metaTmp = TempLevelDb.new() - var totalAvailabilitySize: UInt256 + var totalAvailabilitySize: uint64 var minPricePerBytePerSecond: UInt256 var requestedCollateralPerByte: UInt256 var totalCollateral: UInt256 @@ -139,27 +142,29 @@ asyncchecksuite "Sales": var itemsProcessed: seq[SlotQueueItem] setup: - totalAvailabilitySize = 100.u256 + totalAvailabilitySize = 100.uint64 minPricePerBytePerSecond = 1.u256 requestedCollateralPerByte = 1.u256 - totalCollateral = requestedCollateralPerByte * totalAvailabilitySize + totalCollateral = requestedCollateralPerByte * totalAvailabilitySize.stuint(256) availability = Availability.init( totalSize = totalAvailabilitySize, freeSize = totalAvailabilitySize, - duration = 60.u256, + duration = 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) request = StorageRequest( ask: StorageAsk( slots: 4, - slotSize: 100.u256, - duration: 60.u256, + slotSize: 100.uint64, + duration: 60.uint64, pricePerBytePerSecond: minPricePerBytePerSecond, collateralPerByte: 1.u256, ), - content: StorageContent(cid: "some cid"), - expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, + content: StorageContent( + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet + ), + expiry: (getTime() + initDuration(hours = 1)).toUnix.uint64, ) market = MockMarket.new() @@ -176,12 +181,12 @@ asyncchecksuite "Sales": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = return success() sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = return success() @@ -281,13 +286,13 @@ asyncchecksuite "Sales": test "removes slot index from slot queue once SlotFilled emitted": let request1 = await addRequestToSaturatedQueue() - market.emitSlotFilled(request1.id, 1.u256) + market.emitSlotFilled(request1.id, 1.uint64) let expected = SlotQueueItem.init(request1, 1'u16) check always (not itemsProcessed.contains(expected)) test "removes slot index from slot queue once SlotReservationsFull emitted": let request1 = await addRequestToSaturatedQueue() - market.emitSlotReservationsFull(request1.id, 1.u256) + market.emitSlotReservationsFull(request1.id, 1.uint64) let expected = SlotQueueItem.init(request1, 1'u16) check always (not itemsProcessed.contains(expected)) @@ -298,7 +303,7 @@ asyncchecksuite "Sales": createAvailability() market.requested.add request # "contract" must be able to return request - market.emitSlotFreed(request.id, 2.u256) + market.emitSlotFreed(request.id, 2.uint64) let expected = SlotQueueItem.init(request, 2.uint16) check eventually itemsProcessed.contains(expected) @@ -343,10 +348,10 @@ asyncchecksuite "Sales": test "availability size is reduced by request slot size when fully downloaded": sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = let blk = bt.Block.new(@[1.byte]).get - await onBatch(blk.repeat(request.ask.slotSize.truncate(int))) + await onBatch(blk.repeat(request.ask.slotSize.int)) createAvailability() await market.requestStorage(request) @@ -354,16 +359,16 @@ asyncchecksuite "Sales": availability.freeSize - request.ask.slotSize test "non-downloaded bytes are returned to availability once finished": - var slotIndex = 0.u256 + var slotIndex = 0.uint64 sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new(@[1.byte]).get await onBatch(@[blk]) let sold = newFuture[void]() - sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = + sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = sold.complete() createAvailability() @@ -374,7 +379,7 @@ asyncchecksuite "Sales": # complete request market.slotState[request.slotId(slotIndex)] = SlotState.Finished - clock.advance(request.ask.duration.truncate(int64)) + clock.advance(request.ask.duration.int64) check eventually getAvailability().freeSize == origSize - 1 @@ -406,17 +411,17 @@ asyncchecksuite "Sales": test "ignores request when slot state is not free": createAvailability() await market.requestStorage(request) - market.slotState[request.slotId(0.u256)] = SlotState.Filled - market.slotState[request.slotId(1.u256)] = SlotState.Filled - market.slotState[request.slotId(2.u256)] = SlotState.Filled - market.slotState[request.slotId(3.u256)] = SlotState.Filled + market.slotState[request.slotId(0.uint64)] = SlotState.Filled + market.slotState[request.slotId(1.uint64)] = SlotState.Filled + market.slotState[request.slotId(2.uint64)] = SlotState.Filled + market.slotState[request.slotId(3.uint64)] = SlotState.Filled check wasIgnored() test "retrieves and stores data locally": var storingRequest: StorageRequest - var storingSlot: UInt256 + var storingSlot: uint64 sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = storingRequest = request storingSlot = slot @@ -424,29 +429,12 @@ asyncchecksuite "Sales": createAvailability() await market.requestStorage(request) check eventually storingRequest == request - check storingSlot < request.ask.slots.u256 - - test "handles errors during state run": - var saleFailed = false - sales.onProve = proc( - slot: Slot, challenge: ProofChallenge - ): Future[?!Groth16Proof] {.async.} = - # raise exception so machine.onError is called - raise newException(ValueError, "some error") - - # onClear is called in SaleErrored.run - sales.onClear = proc(request: StorageRequest, idx: UInt256) = - saleFailed = true - createAvailability() - await market.requestStorage(request) - await allowRequestToStart() - - check eventually saleFailed + check storingSlot < request.ask.slots test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = return failure(error) createAvailability() @@ -455,7 +443,7 @@ asyncchecksuite "Sales": test "generates proof of storage": var provingRequest: StorageRequest - var provingSlot: UInt256 + var provingSlot: uint64 sales.onProve = proc( slot: Slot, challenge: ProofChallenge ): Future[?!Groth16Proof] {.async.} = @@ -467,7 +455,7 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually provingRequest == request - check provingSlot < request.ask.slots.u256 + check provingSlot < request.ask.slots test "fills a slot": createAvailability() @@ -476,14 +464,14 @@ asyncchecksuite "Sales": check eventually market.filled.len > 0 check market.filled[0].requestId == request.id - check market.filled[0].slotIndex < request.ask.slots.u256 + check market.filled[0].slotIndex < request.ask.slots check market.filled[0].proof == proof check market.filled[0].host == await market.getSigner() test "calls onFilled when slot is filled": var soldRequest = StorageRequest.default - var soldSlotIndex = UInt256.high - sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = + var soldSlotIndex = uint64.high + sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = soldRequest = request soldSlotIndex = slotIndex createAvailability() @@ -491,7 +479,7 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually soldRequest == request - check soldSlotIndex < request.ask.slots.u256 + check soldSlotIndex < request.ask.slots test "calls onClear when storage becomes available again": # fail the proof intentionally to trigger `agent.finish(success=false)`, @@ -501,8 +489,8 @@ asyncchecksuite "Sales": ): Future[?!Groth16Proof] {.async.} = raise newException(IOError, "proof failed") var clearedRequest: StorageRequest - var clearedSlotIndex: UInt256 - sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = + var clearedSlotIndex: uint64 + sales.onClear = proc(request: StorageRequest, slotIndex: uint64) = clearedRequest = request clearedSlotIndex = slotIndex createAvailability() @@ -510,19 +498,19 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually clearedRequest == request - check clearedSlotIndex < request.ask.slots.u256 + check clearedSlotIndex < request.ask.slots test "makes storage available again when other host fills the slot": let otherHost = Address.example sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() createAvailability() await market.requestStorage(request) for slotIndex in 0 ..< request.ask.slots: - market.fillSlot(request.id, slotIndex.u256, proof, otherHost) + market.fillSlot(request.id, slotIndex.uint64, proof, otherHost) check eventually (await reservations.all(Availability)).get == @[availability] test "makes storage available again when request expires": @@ -531,7 +519,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -551,12 +539,12 @@ asyncchecksuite "Sales": # ensure only one slot, otherwise once bytes are returned to the # availability, the queue will be unpaused and availability will be consumed # by other slots - request.ask.slots = 1.uint64 + request.ask.slots = 1 market.requestExpiry[request.id] = expiry let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -583,21 +571,19 @@ asyncchecksuite "Sales": market.requestState[request.id] = RequestState.New market.requestEnds[request.id] = request.expiry.toSecondsSince1970 - proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + proc fillSlot(slotIdx: uint64 = 0) {.async.} = let address = await market.getSigner() let slot = MockSlot(requestId: request.id, slotIndex: slotIdx, proof: proof, host: address) market.filled.add slot market.slotState[slotId(request.id, slotIdx)] = SlotState.Filled - let slot0 = - MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) + let slot0 = MockSlot(requestId: request.id, slotIndex: 0, proof: proof, host: me) await fillSlot(slot0.slotIndex) - let slot1 = - MockSlot(requestId: request.id, slotIndex: 1.u256, proof: proof, host: me) + let slot1 = MockSlot(requestId: request.id, slotIndex: 1, proof: proof, host: me) await fillSlot(slot1.slotIndex) - market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.activeSlots[me] = @[request.slotId(0), request.slotId(1)] market.requested = @[request] market.activeRequests[me] = @[request.id] @@ -605,16 +591,16 @@ asyncchecksuite "Sales": check eventually sales.agents.len == 2 check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.uint64 ) check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.uint64 ) test "deletes inactive reservations on load": createAvailability() discard await reservations.createReservation( - availability.id, 100.u256, RequestId.example, UInt256.example, UInt256.example + availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example ) check (await reservations.all(Reservation)).get.len == 1 await sales.load() diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index f17711d3..c795904d 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -4,7 +4,6 @@ import pkg/codex/sales import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/sales/statemachine -import pkg/codex/sales/states/errorhandling import ../../asynctest import ../helpers/mockmarket @@ -15,18 +14,12 @@ import ../examples var onCancelCalled = false var onFailedCalled = false var onSlotFilledCalled = false -var onErrorCalled = false -type - MockState = ref object of SaleState - MockErrorState = ref object of ErrorHandlingState +type MockState = ref object of SaleState method `$`*(state: MockState): string = "MockState" -method `$`*(state: MockErrorState): string = - "MockErrorState" - method onCancelled*(state: MockState, request: StorageRequest): ?State = onCancelCalled = true @@ -34,31 +27,24 @@ method onFailed*(state: MockState, request: StorageRequest): ?State = onFailedCalled = true method onSlotFilled*( - state: MockState, requestId: RequestId, slotIndex: UInt256 + state: MockState, requestId: RequestId, slotIndex: uint64 ): ?State = onSlotFilledCalled = true -method onError*(state: MockErrorState, err: ref CatchableError): ?State = - onErrorCalled = true - -method run*(state: MockErrorState, machine: Machine): Future[?State] {.async.} = - raise newException(ValueError, "failure") - asyncchecksuite "Sales agent": let request = StorageRequest.example var agent: SalesAgent var context: SalesContext - var slotIndex: UInt256 + var slotIndex: uint64 var market: MockMarket var clock: MockClock setup: market = MockMarket.new() - market.requestExpiry[request.id] = - getTime().toUnix() + request.expiry.truncate(int64) + market.requestExpiry[request.id] = getTime().toUnix() + request.expiry.int64 clock = MockClock.new() context = SalesContext(market: market, clock: clock) - slotIndex = 0.u256 + slotIndex = 0.uint64 onCancelCalled = false onFailedCalled = false onSlotFilledCalled = false @@ -123,7 +109,9 @@ asyncchecksuite "Sales agent": agent.start(MockState.new()) await agent.subscribe() agent.onFulfilled(request.id) - check eventually agent.data.cancelled.cancelled() + # Note: futures that are cancelled, and do not re-raise the CancelledError + # will have a state of completed, not cancelled. + check eventually agent.data.cancelled.completed() test "current state onFailed called when onFailed called": agent.start(MockState.new()) @@ -134,7 +122,3 @@ asyncchecksuite "Sales agent": agent.start(MockState.new()) agent.onSlotFilled(request.id, slotIndex) check eventually onSlotFilledCalled - - test "ErrorHandlingState.onError can be overridden at the state level": - agent.start(MockErrorState.new()) - check eventually onErrorCalled diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 2e0759ee..46c35b1c 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -146,18 +146,18 @@ suite "Slot queue": test "correctly compares SlotQueueItems": var requestA = StorageRequest.example - requestA.ask.duration = 1.u256 + requestA.ask.duration = 1.uint64 requestA.ask.pricePerBytePerSecond = 1.u256 - check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize + check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize.u256 requestA.ask.collateralPerByte = 100000.u256 - requestA.expiry = 1001.u256 + requestA.expiry = 1001.uint64 var requestB = StorageRequest.example - requestB.ask.duration = 100.u256 + requestB.ask.duration = 100.uint64 requestB.ask.pricePerBytePerSecond = 1000.u256 - check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize + check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize.u256 requestB.ask.collateralPerByte = 1.u256 - requestB.expiry = 1000.u256 + requestB.expiry = 1000.uint64 let itemA = SlotQueueItem.init(requestA, 0) let itemB = SlotQueueItem.init(requestB, 0) @@ -169,21 +169,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 2.u256, # profitability is higher (good) collateralPerByte: 1.u256, - expiry: 1.u256, + expiry: 1.uint64, seen: true, # seen (bad), more weight than profitability ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, # profitability is lower (bad) collateralPerByte: 1.u256, - expiry: 1.u256, + expiry: 1.uint64, seen: false, # not seen (good) ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # B higher priority than A @@ -194,22 +194,22 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, # reward is lower (bad) collateralPerByte: 1.u256, # collateral is lower (good) - expiry: 1.u256, + expiry: 1.uint64, seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 2.u256, # reward is higher (good), more weight than collateral collateralPerByte: 2.u256, # collateral is higher (bad) - expiry: 1.u256, + expiry: 1.uint64, seen: false, ) @@ -220,21 +220,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 2.u256, # collateral is higher (bad) - expiry: 2.u256, # expiry is longer (good) + expiry: 2.uint64, # expiry is longer (good) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry - expiry: 1.u256, # expiry is shorter (bad) + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -245,21 +245,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, # slotSize is smaller (good) - duration: 1.u256, + slotSize: 1.uint64, # slotSize is smaller (good) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 1.u256, # expiry is shorter (bad) + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 2.u256, # slotSize is larger (bad) - duration: 1.u256, + slotSize: 2.uint64, # slotSize is larger (bad) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 2.u256, # expiry is longer (good), more weight than slotSize + expiry: 2.uint64, # expiry is longer (good), more weight than slotSize seen: false, ) @@ -270,21 +270,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 2.u256, # slotSize is larger (bad) - duration: 1.u256, + slotSize: 2.uint64, # slotSize is larger (bad) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 1.u256, # expiry is shorter (bad) + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, # slotSize is smaller (good) - duration: 1.u256, + slotSize: 1.uint64, # slotSize is smaller (good) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 1.u256, + expiry: 1.uint64, seen: false, ) @@ -460,14 +460,14 @@ suite "Slot queue": test "sorts items by expiry descending (longer expiry = higher priority)": var request = StorageRequest.example let item0 = SlotQueueItem.init(request, 0) - request.expiry += 1.u256 + request.expiry += 1 let item1 = SlotQueueItem.init(request, 1) check item1 < item0 test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": var request = StorageRequest.example let item0 = SlotQueueItem.init(request, 0) - request.ask.slotSize += 1.u256 + request.ask.slotSize += 1 let item1 = SlotQueueItem.init(request, 1) check item1 < item0 diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index 03d87d12..fced1f1c 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -15,9 +15,7 @@ import pkg/codex/rng import ../helpers -proc storeManifest*( - store: BlockStore, manifest: Manifest -): Future[?!bt.Block] {.async.} = +proc makeManifestBlock*(manifest: Manifest): ?!bt.Block = without encodedVerifiable =? manifest.encode(), err: trace "Unable to encode manifest" return failure(err) @@ -26,6 +24,15 @@ proc storeManifest*( trace "Unable to create block from manifest" return failure(error) + success blk + +proc storeManifest*( + store: BlockStore, manifest: Manifest +): Future[?!bt.Block] {.async.} = + without blk =? makeManifestBlock(manifest), err: + trace "Unable to create manifest block", err = err.msg + return failure(err) + if err =? (await store.putBlock(blk)).errorOption: trace "Unable to store manifest block", cid = blk.cid, err = err.msg return failure(err) diff --git a/tests/codex/stores/testqueryiterhelper.nim b/tests/codex/stores/testqueryiterhelper.nim index 5d3d68fd..4e83dad4 100644 --- a/tests/codex/stores/testqueryiterhelper.nim +++ b/tests/codex/stores/testqueryiterhelper.nim @@ -1,6 +1,6 @@ import std/sugar -import pkg/stew/results +import pkg/results import pkg/questionable import pkg/chronos import pkg/datastore/typedds diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index dda4ed82..0279b56f 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -12,9 +12,11 @@ import pkg/datastore import pkg/codex/stores/cachestore import pkg/codex/chunker import pkg/codex/stores +import pkg/codex/stores/repostore/operations import pkg/codex/blocktype as bt import pkg/codex/clock import pkg/codex/utils/asynciter +import pkg/codex/merkletree/codex import ../../asynctest import ../helpers @@ -354,6 +356,119 @@ asyncchecksuite "RepoStore": check has.isOk check has.get + test "should set the reference count for orphan blocks to 0": + let blk = Block.example(size = 200) + (await repo.putBlock(blk)).tryGet() + check (await repo.blockRefCount(blk.cid)).tryGet() == 0.Natural + + test "should not allow non-orphan blocks to be deleted directly": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(0).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + let err = (await repo.delBlock(blk.cid)).error() + check err.msg == + "Directly deleting a block that is part of a dataset is not allowed." + + test "should allow non-orphan blocks to be deleted by dataset reference": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(0).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + check not (await blk.cid in repo) + + test "should not delete a non-orphan block until it is deleted from all parent datasets": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + blockPool = await makeRandomBlocks(datasetSize = 768, blockSize = 256'nb) + + let + dataset1 = @[blockPool[0], blockPool[1]] + dataset2 = @[blockPool[1], blockPool[2]] + + let sharedBlock = blockPool[1] + + let + (manifest1, tree1) = makeManifestAndTree(dataset1).tryGet() + treeCid1 = tree1.rootCid.tryGet() + (manifest2, tree2) = makeManifestAndTree(dataset2).tryGet() + treeCid2 = tree2.rootCid.tryGet() + + (await repo.putBlock(sharedBlock)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 0.Natural + + let + proof1 = tree1.getProof(1).tryGet() + proof2 = tree2.getProof(0).tryGet() + + (await repo.putCidAndProof(treeCid1, 1, sharedBlock.cid, proof1)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 1.Natural + + (await repo.putCidAndProof(treeCid2, 0, sharedBlock.cid, proof2)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 2.Natural + + (await repo.delBlock(treeCid1, 1.Natural)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 1.Natural + check (await sharedBlock.cid in repo) + + (await repo.delBlock(treeCid2, 0.Natural)).tryGet() + check not (await sharedBlock.cid in repo) + + test "should clear leaf metadata when block is deleted from dataset": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(1).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0.Natural, blk.cid, proof)).tryGet() + + discard (await repo.getLeafMetadata(treeCid, 0.Natural)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + + let err = (await repo.getLeafMetadata(treeCid, 0.Natural)).error() + check err of BlockNotFoundError + + test "should not fail when reinserting and deleting a previously deleted block (bug #1108)": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(1).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + (await repo.putBlock(blk)).tryGet() + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + commonBlockStoreTests( "RepoStore Sql backend", proc(): BlockStore = diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index eb3767cd..a9c6769b 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -1,5 +1,5 @@ import pkg/chronos -import pkg/stew/results +import pkg/results import pkg/codex/utils/asyncheapqueue import pkg/codex/rng diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 952497e9..d469b379 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -1,5 +1,6 @@ import std/sequtils import std/sugar +import std/times import pkg/chronos import pkg/questionable/results @@ -11,6 +12,8 @@ import pkg/codex/blocktype as bt import pkg/codex/rng import pkg/codex/utils import pkg/codex/indexingstrategy +import pkg/taskpools +import pkg/codex/utils/arrayutils import ../asynctest import ./helpers @@ -27,6 +30,7 @@ suite "Erasure encode/decode": var erasure: Erasure let repoTmp = TempLevelDb.new() let metaTmp = TempLevelDb.new() + var taskpool: Taskpool setup: let @@ -35,12 +39,14 @@ suite "Erasure encode/decode": rng = Rng.instance() chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize) store = RepoStore.new(repoDs, metaDs) - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + taskpool = Taskpool.new() + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) manifest = await storeDataGetManifest(store, chunker) teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() + taskpool.shutdown() proc encode(buffers, parity: int): Future[Manifest] {.async.} = let encoded = @@ -212,7 +218,7 @@ suite "Erasure encode/decode": let present = await store.hasBlock(manifest.treeCid, d) check present.tryGet() - test "handles edge case of 0 parity blocks": + test "Handles edge case of 0 parity blocks": const buffers = 20 parity = 0 @@ -221,6 +227,43 @@ suite "Erasure encode/decode": discard (await erasure.decode(encoded)).tryGet() + test "Should concurrently encode/decode multiple datasets": + const iterations = 2 + + let + datasetSize = 1.MiBs + ecK = 10.Natural + ecM = 10.Natural + + var encodeTasks = newSeq[Future[?!Manifest]]() + var decodeTasks = newSeq[Future[?!Manifest]]() + var manifests = newSeq[Manifest]() + for i in 0 ..< iterations: + let + # create random data and store it + blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs)) + chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize) + manifest = await storeDataGetManifest(store, chunker) + manifests.add(manifest) + # encode the data concurrently + encodeTasks.add(erasure.encode(manifest, ecK, ecM)) + # wait for all encoding tasks to finish + let encodeResults = await allFinished(encodeTasks) + # decode the data concurrently + for i in 0 ..< encodeResults.len: + decodeTasks.add(erasure.decode(encodeResults[i].read().tryGet())) + # wait for all decoding tasks to finish + let decodeResults = await allFinished(decodeTasks) # TODO: use allFutures + + for j in 0 ..< decodeTasks.len: + let + decoded = decodeResults[j].read().tryGet() + encoded = encodeResults[j].read().tryGet() + check: + decoded.treeCid == manifests[j].treeCid + decoded.treeCid == encoded.originalTreeCid + decoded.blocksCount == encoded.originalBlocksCount + test "Should handle verifiable manifests": const buffers = 20 @@ -259,3 +302,73 @@ suite "Erasure encode/decode": decoded.treeCid == manifest.treeCid decoded.treeCid == encoded.originalTreeCid decoded.blocksCount == encoded.originalBlocksCount + + test "Should complete encode/decode task when cancelled": + let + blocksLen = 10000 + parityLen = 10 + data = seq[seq[byte]].new() + chunker = RandomChunker.new( + rng, size = (blocksLen * BlockSize.int), chunkSize = BlockSize + ) + + data[].setLen(blocksLen) + + for i in 0 ..< blocksLen: + let chunk = await chunker.getBytes() + shallowCopy(data[i], @(chunk)) + + let + parity = createDoubleArray(parityLen, BlockSize.int) + paritySeq = seq[seq[byte]].new() + recovered = createDoubleArray(blocksLen, BlockSize.int) + cancelledTaskParity = createDoubleArray(parityLen, BlockSize.int) + cancelledTaskRecovered = createDoubleArray(blocksLen, BlockSize.int) + + paritySeq[].setLen(parityLen) + defer: + freeDoubleArray(parity, parityLen) + freeDoubleArray(cancelledTaskParity, parityLen) + freeDoubleArray(recovered, blocksLen) + freeDoubleArray(cancelledTaskRecovered, blocksLen) + + for i in 0 ..< parityLen: + paritySeq[i] = cast[seq[byte]](parity[i]) + + # call encodeAsync to get the parity + let encFut = + await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity) + check encFut.isOk + + let decFut = await erasure.decodeAsync( + BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered + ) + check decFut.isOk + + # call encodeAsync and cancel the task + let encodeFut = erasure.encodeAsync( + BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity + ) + encodeFut.cancel() + + try: + discard await encodeFut + except CatchableError as exc: + check exc of CancelledError + finally: + for i in 0 ..< parityLen: + check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int) + + # call decodeAsync and cancel the task + let decodeFut = erasure.decodeAsync( + BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered + ) + decodeFut.cancel() + + try: + discard await decodeFut + except CatchableError as exc: + check exc of CancelledError + finally: + for i in 0 ..< blocksLen: + check equalMem(recovered[i], cancelledTaskRecovered[i], BlockSize.int) diff --git a/tests/codex/testnat.nim b/tests/codex/testnat.nim index 57f51d31..3981b2e6 100644 --- a/tests/codex/testnat.nim +++ b/tests/codex/testnat.nim @@ -1,7 +1,7 @@ import std/[unittest, options, net], stew/shims/net as stewNet import pkg/chronos import pkg/libp2p/[multiaddress, multihash, multicodec] -import pkg/stew/results +import pkg/results import ../../codex/nat import ../../codex/utils/natutils diff --git a/tests/codex/testpurchasing.nim b/tests/codex/testpurchasing.nim index bbab4197..5a4e85e9 100644 --- a/tests/codex/testpurchasing.nim +++ b/tests/codex/testpurchasing.nim @@ -28,8 +28,8 @@ asyncchecksuite "Purchasing": request = StorageRequest( ask: StorageAsk( slots: uint8.example.uint64, - slotSize: uint32.example.u256, - duration: uint16.example.u256, + slotSize: uint32.example.uint64, + duration: uint16.example.uint64, pricePerBytePerSecond: uint8.example.u256, ) ) @@ -100,7 +100,6 @@ asyncchecksuite "Purchasing": market.requestExpiry[populatedRequest.id] = expiry let purchase = await purchasing.purchase(populatedRequest) check eventually market.requested.len > 0 - let request = market.requested[0] clock.set(expiry + 1) expect PurchaseTimeout: @@ -130,8 +129,8 @@ checksuite "Purchasing state machine": request = StorageRequest( ask: StorageAsk( slots: uint8.example.uint64, - slotSize: uint32.example.u256, - duration: uint16.example.u256, + slotSize: uint32.example.uint64, + duration: uint16.example.uint64, pricePerBytePerSecond: uint8.example.u256, ) ) @@ -185,7 +184,7 @@ checksuite "Purchasing state machine": test "moves to PurchaseStarted when request state is Started": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 market.requested = @[request] market.requestState[request.id] = RequestState.Started let next = await PurchaseUnknown().run(purchase) @@ -218,7 +217,7 @@ checksuite "Purchasing state machine": test "moves to PurchaseFailed state once RequestFailed emitted": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 let future = PurchaseStarted().run(purchase) market.emitRequestFailed(request.id) @@ -229,10 +228,10 @@ checksuite "Purchasing state machine": test "moves to PurchaseFinished state once request finishes": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 let future = PurchaseStarted().run(purchase) - clock.advance(request.ask.duration.truncate(int64) + 1) + clock.advance(request.ask.duration.int64 + 1) let next = await future check !next of PurchaseFinished diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim index 95d913c3..30d6e3f3 100644 --- a/tests/codex/testvalidation.nim +++ b/tests/codex/testvalidation.nim @@ -16,8 +16,8 @@ logScope: topics = "testValidation" asyncchecksuite "validation": - let period = 10 - let timeout = 5 + let period = 10.uint64 + let timeout = 5.uint64 let maxSlots = MaxSlots(100) let validationGroups = ValidationGroups(8).some let slot = Slot.example @@ -51,8 +51,8 @@ asyncchecksuite "validation": groupIndex = groupIndexForSlotId(slot.id, !validationGroups) clock = MockClock.new() market = MockMarket.new(clock = Clock(clock).some) - market.config.proofs.period = period.u256 - market.config.proofs.timeout = timeout.u256 + market.config.proofs.period = period + market.config.proofs.timeout = timeout validation = newValidation(clock, market, maxSlots, validationGroups, groupIndex) teardown: @@ -60,10 +60,10 @@ asyncchecksuite "validation": await validation.stop() proc advanceToNextPeriod() = - let periodicity = Periodicity(seconds: period.u256) - let period = periodicity.periodOf(clock.now().u256) + let periodicity = Periodicity(seconds: period) + let period = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(period) - clock.set((periodEnd + 1).truncate(int)) + clock.set(periodEnd.toSecondsSince1970 + 1) test "the list of slots that it's monitoring is empty initially": check validation.slots.len == 0 diff --git a/tests/codex/utils/testasyncstatemachine.nim b/tests/codex/utils/testasyncstatemachine.nim index 40a040c4..ed3ea747 100644 --- a/tests/codex/utils/testasyncstatemachine.nim +++ b/tests/codex/utils/testasyncstatemachine.nim @@ -10,9 +10,8 @@ type State1 = ref object of State State2 = ref object of State State3 = ref object of State - State4 = ref object of State -var runs, cancellations, errors = [0, 0, 0, 0] +var runs, cancellations = [0, 0, 0, 0] method `$`(state: State1): string = "State1" @@ -23,28 +22,20 @@ method `$`(state: State2): string = method `$`(state: State3): string = "State3" -method `$`(state: State4): string = - "State4" - -method run(state: State1, machine: Machine): Future[?State] {.async.} = +method run(state: State1, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[0] return some State(State2.new()) -method run(state: State2, machine: Machine): Future[?State] {.async.} = +method run(state: State2, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[1] try: await sleepAsync(1.hours) except CancelledError: inc cancellations[1] - raise -method run(state: State3, machine: Machine): Future[?State] {.async.} = +method run(state: State3, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[2] -method run(state: State4, machine: Machine): Future[?State] {.async.} = - inc runs[3] - raise newException(ValueError, "failed") - method onMoveToNextStateEvent*(state: State): ?State {.base, upraises: [].} = discard @@ -54,19 +45,6 @@ method onMoveToNextStateEvent(state: State2): ?State = method onMoveToNextStateEvent(state: State3): ?State = some State(State1.new()) -method onError(state: State1, error: ref CatchableError): ?State = - inc errors[0] - -method onError(state: State2, error: ref CatchableError): ?State = - inc errors[1] - -method onError(state: State3, error: ref CatchableError): ?State = - inc errors[2] - -method onError(state: State4, error: ref CatchableError): ?State = - inc errors[3] - some State(State2.new()) - asyncchecksuite "async state machines": var machine: Machine @@ -76,7 +54,6 @@ asyncchecksuite "async state machines": setup: runs = [0, 0, 0, 0] cancellations = [0, 0, 0, 0] - errors = [0, 0, 0, 0] machine = Machine.new() test "should call run on start state": @@ -112,16 +89,6 @@ asyncchecksuite "async state machines": check runs == [0, 1, 0, 0] check cancellations == [0, 1, 0, 0] - test "forwards errors to error handler": - machine.start(State4.new()) - check eventually errors == [0, 0, 0, 1] and runs == [0, 1, 0, 1] - - test "error handler ignores CancelledError": - machine.start(State2.new()) - machine.schedule(moveToNextStateEvent) - check eventually cancellations == [0, 1, 0, 0] - check errors == [0, 0, 0, 0] - test "queries properties of the current state": proc description(state: State): string = $state diff --git a/tests/contracts/helpers/mockprovider.nim b/tests/contracts/helpers/mockprovider.nim index 09e65398..c5be8ad7 100644 --- a/tests/contracts/helpers/mockprovider.nim +++ b/tests/contracts/helpers/mockprovider.nim @@ -13,7 +13,7 @@ type MockProvider* = ref object of Provider method getBlock*( provider: MockProvider, tag: BlockTag -): Future[?Block] {.async: (raises: [ProviderError]).} = +): Future[?Block] {.async: (raises: [ProviderError, CancelledError]).} = try: if tag == BlockTag.latest: if latestBlock =? provider.latest: diff --git a/tests/contracts/testContracts.nim b/tests/contracts/testContracts.nim index 3af63ac1..84708ecd 100644 --- a/tests/contracts/testContracts.nim +++ b/tests/contracts/testContracts.nim @@ -49,28 +49,29 @@ ethersuite "Marketplace contracts": switchAccount(host) discard await token.approve(marketplace.address, request.ask.collateralPerSlot).confirm(1) - discard await marketplace.reserveSlot(request.id, 0.u256).confirm(1) - let receipt = await marketplace.fillSlot(request.id, 0.u256, proof).confirm(1) + discard await marketplace.reserveSlot(request.id, 0.uint64).confirm(1) + let receipt = await marketplace.fillSlot(request.id, 0.uint64, proof).confirm(1) filledAt = await ethProvider.blockTime(BlockTag.init(!receipt.blockNumber)) - slotId = request.slotId(0.u256) + slotId = request.slotId(0.uint64) proc waitUntilProofRequired(slotId: SlotId) {.async.} = - let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod)) + let currentPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod).u256) while not ( (await marketplace.isProofRequired(slotId)) and (await marketplace.getPointer(slotId)) < 250 ) : - await ethProvider.advanceTime(periodicity.seconds) + await ethProvider.advanceTime(periodicity.seconds.u256) proc startContract() {.async.} = for slotIndex in 1 ..< request.ask.slots: discard await token .approve(marketplace.address, request.ask.collateralPerSlot) .confirm(1) - discard await marketplace.reserveSlot(request.id, slotIndex.u256).confirm(1) - discard await marketplace.fillSlot(request.id, slotIndex.u256, proof).confirm(1) + discard await marketplace.reserveSlot(request.id, slotIndex.uint64).confirm(1) + discard await marketplace.fillSlot(request.id, slotIndex.uint64, proof).confirm(1) test "accept marketplace proofs": switchAccount(host) @@ -80,9 +81,10 @@ ethersuite "Marketplace contracts": test "can mark missing proofs": switchAccount(host) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) let endOfPeriod = periodicity.periodEnd(missingPeriod) - await ethProvider.advanceTimeTo(endOfPeriod + 1) + await ethProvider.advanceTimeTo(endOfPeriod.u256 + 1) switchAccount(client) discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) @@ -123,7 +125,8 @@ ethersuite "Marketplace contracts": let expiry = await marketplace.requestExpiry(request.id) await ethProvider.advanceTimeTo((expiry + 1).u256) switchAccount(client) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTime(periodicity.seconds) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTime(periodicity.seconds.u256) expect Marketplace_SlotNotAcceptingProofs: discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) diff --git a/tests/contracts/testDeployment.nim b/tests/contracts/testDeployment.nim index a439e42a..86a5fe00 100644 --- a/tests/contracts/testDeployment.nim +++ b/tests/contracts/testDeployment.nim @@ -12,7 +12,7 @@ type MockProvider = ref object of Provider method getChainId*( provider: MockProvider -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = return provider.chainId proc configFactory(): CodexConf = diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index a77c2aaa..74d6a65e 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -3,6 +3,8 @@ import std/importutils import pkg/chronos import pkg/ethers/erc20 import codex/contracts +import pkg/libp2p/cid +import pkg/lrucache import ../ethertest import ./examples import ./time @@ -23,7 +25,7 @@ ethersuite "On-Chain Market": var marketplace: Marketplace var token: Erc20Token var request: StorageRequest - var slotIndex: UInt256 + var slotIndex: uint64 var periodicity: Periodicity var host: Signer var otherHost: Signer @@ -56,11 +58,12 @@ ethersuite "On-Chain Market": host = ethProvider.getSigner(accounts[1]) otherHost = ethProvider.getSigner(accounts[3]) - slotIndex = (request.ask.slots div 2).u256 + slotIndex = request.ask.slots div 2 proc advanceToNextPeriod() {.async.} = - let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod) + 1) + let currentPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTimeTo((periodicity.periodEnd(currentPeriod) + 1).u256) proc advanceToCancelledRequest(request: StorageRequest) {.async.} = let expiry = (await market.requestExpiresAt(request.id)) + 1 @@ -123,12 +126,13 @@ ethersuite "On-Chain Market": test "supports request subscriptions": var receivedIds: seq[RequestId] var receivedAsks: seq[StorageAsk] - proc onRequest(id: RequestId, ask: StorageAsk, expiry: UInt256) = + proc onRequest(id: RequestId, ask: StorageAsk, expiry: uint64) = receivedIds.add(id) receivedAsks.add(ask) let subscription = await market.subscribeRequests(onRequest) await market.requestStorage(request) + check eventually receivedIds == @[request.id] and receivedAsks == @[request.ask] await subscription.unsubscribe() @@ -170,7 +174,8 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() await market.markProofAsMissing(slotId, missingPeriod) check (await marketplace.missingProofs(slotId)) == 1 @@ -181,15 +186,16 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() check (await market.canProofBeMarkedAsMissing(slotId, missingPeriod)) == true test "supports slot filled subscriptions": await market.requestStorage(request) var receivedIds: seq[RequestId] - var receivedSlotIndices: seq[UInt256] - proc onSlotFilled(id: RequestId, slotIndex: UInt256) = + var receivedSlotIndices: seq[uint64] + proc onSlotFilled(id: RequestId, slotIndex: uint64) = receivedIds.add(id) receivedSlotIndices.add(slotIndex) @@ -204,8 +210,8 @@ ethersuite "On-Chain Market": test "subscribes only to a certain slot": var otherSlot = slotIndex - 1 await market.requestStorage(request) - var receivedSlotIndices: seq[UInt256] - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + var receivedSlotIndices: seq[uint64] + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = receivedSlotIndices.add(slotIndex) let subscription = @@ -222,8 +228,8 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) var receivedRequestIds: seq[RequestId] = @[] - var receivedIdxs: seq[UInt256] = @[] - proc onSlotFreed(requestId: RequestId, idx: UInt256) = + var receivedIdxs: seq[uint64] = @[] + proc onSlotFreed(requestId: RequestId, idx: uint64) = receivedRequestIds.add(requestId) receivedIdxs.add(idx) @@ -241,8 +247,8 @@ ethersuite "On-Chain Market": await market.requestStorage(request) var receivedRequestIds: seq[RequestId] = @[] - var receivedIdxs: seq[UInt256] = @[] - proc onSlotReservationsFull(requestId: RequestId, idx: UInt256) = + var receivedIdxs: seq[uint64] = @[] + proc onSlotReservationsFull(requestId: RequestId, idx: uint64) = receivedRequestIds.add(requestId) receivedIdxs.add(idx) @@ -268,9 +274,9 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeFulfillment(request.id, onFulfillment) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) check eventually receivedIds == @[request.id] await subscription.unsubscribe() @@ -289,14 +295,14 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeFulfillment(request.id, onFulfillment) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) for slotIndex in 0 ..< otherRequest.ask.slots: - await market.reserveSlot(otherRequest.id, slotIndex.u256) + await market.reserveSlot(otherRequest.id, slotIndex.uint64) await market.fillSlot( - otherRequest.id, slotIndex.u256, proof, otherRequest.ask.collateralPerSlot + otherRequest.id, slotIndex.uint64, proof, otherRequest.ask.collateralPerSlot ) check eventually receivedIds == @[request.id] @@ -328,18 +334,19 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeRequestFailed(request.id, onRequestFailed) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) for slotIndex in 0 .. request.ask.maxSlotLoss: - let slotId = request.slotId(slotIndex.u256) + let slotId = request.slotId(slotIndex.uint64) while true: let slotState = await marketplace.slotState(slotId) if slotState == SlotState.Repair or slotState == SlotState.Failed: break await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) check eventually receivedIds == @[request.id] @@ -393,9 +400,9 @@ ethersuite "On-Chain Market": test "can retrieve request state": await market.requestStorage(request) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) check (await market.requestState(request.id)) == some RequestState.Started @@ -458,13 +465,12 @@ ethersuite "On-Chain Market": test "can query past SlotFilled events": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) - let slotId = request.slotId(slotIndex) + await market.reserveSlot(request.id, 0.uint64) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) # `market.fill` executes an `approve` tx before the `fillSlot` tx, so that's # two PoA blocks per `fillSlot` call (6 blocks for 3 calls). We don't need @@ -473,15 +479,15 @@ ethersuite "On-Chain Market": let events = await market.queryPastSlotFilledEvents(blocksAgo = 5) check events == @[ - SlotFilled(requestId: request.id, slotIndex: 0.u256), - SlotFilled(requestId: request.id, slotIndex: 1.u256), - SlotFilled(requestId: request.id, slotIndex: 2.u256), + SlotFilled(requestId: request.id, slotIndex: 0), + SlotFilled(requestId: request.id, slotIndex: 1), + SlotFilled(requestId: request.id, slotIndex: 2), ] test "can query past SlotFilled events since given timestamp": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) # The SlotFilled event will be included in the same block as # the fillSlot transaction. If we want to ignore the SlotFilled event @@ -492,10 +498,10 @@ ethersuite "On-Chain Market": let (_, fromTime) = await ethProvider.blockNumberAndTimestamp(BlockTag.latest) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) let events = await market.queryPastSlotFilledEvents( fromTime = fromTime.truncate(SecondsSince1970) @@ -503,19 +509,19 @@ ethersuite "On-Chain Market": check events == @[ - SlotFilled(requestId: request.id, slotIndex: 1.u256), - SlotFilled(requestId: request.id, slotIndex: 2.u256), + SlotFilled(requestId: request.id, slotIndex: 1), + SlotFilled(requestId: request.id, slotIndex: 2), ] test "queryPastSlotFilledEvents returns empty sequence of events when " & "no SlotFilled events have occurred since given timestamp": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) await ethProvider.advanceTime(10.u256) @@ -540,21 +546,21 @@ ethersuite "On-Chain Market": let address = await host.getAddress() switchAccount(host) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) let filledAt = (await ethProvider.currentTime()) - 1.u256 for slotIndex in 1 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) let requestEnd = await market.getRequestEnd(request.id) await ethProvider.advanceTimeTo(requestEnd.u256 + 1) let startBalance = await token.balanceOf(address) - await market.freeSlot(request.slotId(0.u256)) + await market.freeSlot(request.slotId(0.uint64)) let endBalance = await token.balanceOf(address) let expectedPayout = request.expectedPayout(filledAt, requestEnd.u256) @@ -567,14 +573,14 @@ ethersuite "On-Chain Market": await market.requestStorage(request) switchAccount(host) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) let filledAt = (await ethProvider.currentTime()) - 1.u256 for slotIndex in 1 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) let requestEnd = await market.getRequestEnd(request.id) @@ -583,7 +589,7 @@ ethersuite "On-Chain Market": let startBalanceHost = await token.balanceOf(hostAddress) let startBalanceReward = await token.balanceOf(hostRewardRecipient) - await market.freeSlot(request.slotId(0.u256)) + await market.freeSlot(request.slotId(0.uint64)) let endBalanceHost = await token.balanceOf(hostAddress) let endBalanceReward = await token.balanceOf(hostRewardRecipient) @@ -591,3 +597,13 @@ ethersuite "On-Chain Market": let expectedPayout = request.expectedPayout(filledAt, requestEnd.u256) check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot) check endBalanceReward == (startBalanceReward + expectedPayout) + + test "the request is added in cache after the fist access": + await market.requestStorage(request) + + check market.requestCache.contains($request.id) == false + discard await market.getRequest(request.id) + + check market.requestCache.contains($request.id) == true + let cacheValue = market.requestCache[$request.id] + check cacheValue == request diff --git a/tests/examples.nim b/tests/examples.nim index c96fefd6..9b88b4a5 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -49,30 +49,30 @@ proc example*(_: type StorageRequest): StorageRequest = client: Address.example, ask: StorageAsk( slots: 4, - slotSize: (1 * 1024 * 1024 * 1024).u256, # 1 Gigabyte - duration: (10 * 60 * 60).u256, # 10 hours + slotSize: (1 * 1024 * 1024 * 1024).uint64, # 1 Gigabyte + duration: (10 * 60 * 60).uint64, # 10 hours collateralPerByte: 1.u256, proofProbability: 4.u256, # require a proof roughly once every 4 periods pricePerBytePerSecond: 1.u256, maxSlotLoss: 2, # 2 slots can be freed without data considered to be lost ), content: StorageContent( - cid: "zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob", + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet, merkleRoot: array[32, byte].example, ), - expiry: (60 * 60).u256, # 1 hour , + expiry: (60 * 60).uint64, # 1 hour , nonce: Nonce.example, ) proc example*(_: type Slot): Slot = let request = StorageRequest.example - let slotIndex = rand(request.ask.slots.int).u256 + let slotIndex = rand(request.ask.slots.int).uint64 Slot(request: request, slotIndex: slotIndex) proc example*(_: type SlotQueueItem): SlotQueueItem = let request = StorageRequest.example let slot = Slot.example - SlotQueueItem.init(request, slot.slotIndex.truncate(uint16)) + SlotQueueItem.init(request, slot.slotIndex.uint16) proc example(_: type G1Point): G1Point = G1Point(x: UInt256.example, y: UInt256.example) diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index d1191fb9..287f465f 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -86,6 +86,16 @@ proc downloadBytes*( success bytes +proc delete*(client: CodexClient, cid: Cid): ?!void = + let + url = client.baseurl & "/data/" & $cid + response = client.http.delete(url) + + if response.status != "204 No Content": + return failure(response.status) + + success() + proc list*(client: CodexClient): ?!RestContentList = let url = client.baseurl & "/data" let response = client.http.get(url) @@ -107,11 +117,11 @@ proc space*(client: CodexClient): ?!RestRepoStore = proc requestStorageRaw*( client: CodexClient, cid: Cid, - duration: UInt256, + duration: uint64, pricePerBytePerSecond: UInt256, proofProbability: UInt256, collateralPerByte: UInt256, - expiry: uint = 0, + expiry: uint64 = 0, nodes: uint = 3, tolerance: uint = 1, ): Response = @@ -136,10 +146,10 @@ proc requestStorageRaw*( proc requestStorage*( client: CodexClient, cid: Cid, - duration: UInt256, + duration: uint64, pricePerBytePerSecond: UInt256, proofProbability: UInt256, - expiry: uint, + expiry: uint64, collateralPerByte: UInt256, nodes: uint = 3, tolerance: uint = 1, @@ -177,7 +187,8 @@ proc getSlots*(client: CodexClient): ?!seq[Slot] = proc postAvailability*( client: CodexClient, - totalSize, duration, minPricePerBytePerSecond, totalCollateral: UInt256, + totalSize, duration: uint64, + minPricePerBytePerSecond, totalCollateral: UInt256, ): ?!Availability = ## Post sales availability endpoint ## @@ -197,8 +208,8 @@ proc postAvailability*( proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, freeSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = - UInt256.none, + totalSize, freeSize, duration: ?uint64 = uint64.none, + minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, ): Response = ## Updates availability ## @@ -227,8 +238,8 @@ proc patchAvailabilityRaw*( proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = - UInt256.none, + totalSize, duration: ?uint64 = uint64.none, + minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, ): void = let response = client.patchAvailabilityRaw( availabilityId, @@ -284,3 +295,6 @@ proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), httpMethod = HttpGet, ) + +proc deleteRaw*(client: CodexClient, cid: string): Response = + return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) diff --git a/tests/integration/codexconfig.nim b/tests/integration/codexconfig.nim index 41d7109c..138ae274 100644 --- a/tests/integration/codexconfig.nim +++ b/tests/integration/codexconfig.nim @@ -200,6 +200,54 @@ proc withLogLevel*( config.addCliOption("--log-level", $level) return startConfig +proc withBlockTtl*( + self: CodexConfig, ttl: int +): CodexConfig {.raises: [CodexConfigError].} = + var config = self + config.addCliOption("--block-ttl", $ttl) + return config + +proc withBlockTtl*( + self: CodexConfigs, idx: int, ttl: int +): CodexConfigs {.raises: [CodexConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--block-ttl", $ttl) + return startConfig + +proc withBlockTtl*( + self: CodexConfigs, ttl: int +): CodexConfigs {.raises: [CodexConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--block-ttl", $ttl) + return startConfig + +proc withBlockMaintenanceInterval*( + self: CodexConfig, interval: int +): CodexConfig {.raises: [CodexConfigError].} = + var config = self + config.addCliOption("--block-mi", $interval) + return config + +proc withBlockMaintenanceInterval*( + self: CodexConfigs, idx: int, interval: int +): CodexConfigs {.raises: [CodexConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--block-mi", $interval) + return startConfig + +proc withBlockMaintenanceInterval*( + self: CodexConfigs, interval: int +): CodexConfigs {.raises: [CodexConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--block-mi", $interval) + return startConfig + proc withSimulateProofFailures*( self: CodexConfigs, idx: int, failEveryNProofs: int ): CodexConfigs {.raises: [CodexConfigError].} = diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index 68283ad1..d7502bf4 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -20,14 +20,14 @@ template marketplacesuite*(name: string, body: untyped) = var token {.inject, used.}: Erc20Token proc getCurrentPeriod(): Future[Period] {.async.} = - return periodicity.periodOf(await ethProvider.currentTime()) + return periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) proc advanceToNextPeriod() {.async.} = - let periodicity = Periodicity(seconds: period.u256) - let currentTime = await ethProvider.currentTime() + let periodicity = Periodicity(seconds: period) + let currentTime = (await ethProvider.currentTime()).truncate(uint64) let currentPeriod = periodicity.periodOf(currentTime) let endOfPeriod = periodicity.periodEnd(currentPeriod) - await ethProvider.advanceTimeTo(endOfPeriod + 1) + await ethProvider.advanceTimeTo(endOfPeriod.u256 + 1) template eventuallyP(condition: untyped, finalPeriod: Period): bool = proc eventuallyP(): Future[bool] {.async.} = @@ -56,19 +56,19 @@ template marketplacesuite*(name: string, body: untyped) = return nodes.u256 * slotSize(blocks, nodes, tolerance) proc createAvailabilities( - datasetSize: UInt256, + datasetSize: uint64, duration: uint64, collateralPerByte: UInt256, minPricePerBytePerSecond: UInt256, ) = - let totalCollateral = datasetSize * collateralPerByte + let totalCollateral = datasetSize.u256 * collateralPerByte # post availability to each provider for i in 0 ..< providers().len: let provider = providers()[i].client discard provider.postAvailability( totalSize = datasetSize, - duration = duration.u256, + duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) @@ -76,7 +76,7 @@ template marketplacesuite*(name: string, body: untyped) = proc requestStorage( client: CodexClient, cid: Cid, - proofProbability = 1, + proofProbability = 1.u256, duration: uint64 = 12.periods, pricePerBytePerSecond = 1.u256, collateralPerByte = 1.u256, @@ -86,9 +86,9 @@ template marketplacesuite*(name: string, body: untyped) = ): Future[PurchaseId] {.async.} = let id = client.requestStorage( cid, - expiry = expiry.uint, - duration = duration.u256, - proofProbability = proofProbability.u256, + expiry = expiry, + duration = duration, + proofProbability = proofProbability, collateralPerByte = collateralPerByte, pricePerBytePerSecond = pricePerBytePerSecond, nodes = nodes.uint, @@ -102,7 +102,7 @@ template marketplacesuite*(name: string, body: untyped) = let tokenAddress = await marketplace.token() token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) let config = await marketplace.configuration() - period = config.proofs.period.truncate(uint64) - periodicity = Periodicity(seconds: period.u256) + period = config.proofs.period + periodicity = Periodicity(seconds: period) body diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index ae9a38ab..bade6899 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -22,6 +22,7 @@ export hardhatprocess export codexprocess export hardhatconfig export codexconfig +export nodeconfigs type RunningNode* = ref object diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index e3fad75c..7e742c2a 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -1,89 +1,50 @@ -import std/os -import std/httpclient -import std/strutils -from std/net import TimeoutError +import ../examples +import ./multinodes -import pkg/chronos -import ../ethertest -import ./codexprocess -import ./nodeprocess - -ethersuite "Node block expiration tests": - var node: CodexProcess - var baseurl: string - - let dataDir = getTempDir() / "Codex1" - let content = "test file content" +multinodesuite "Node block expiration tests": + var content: seq[byte] setup: - baseurl = "http://localhost:8080/api/codex/v1" + content = await RandomChunker.example(blocks = 8) - teardown: - await node.stop() + test "node retains not-expired file", + NodeConfigs( + clients: CodexConfigs + .init(nodes = 1) + .withBlockTtl(0, 10) + .withBlockMaintenanceInterval(0, 1).some, + providers: CodexConfigs.none, + ): + let client = clients()[0] + let clientApi = client.client - dataDir.removeDir() - - proc startTestNode(blockTtlSeconds: int) {.async.} = - node = await CodexProcess.startNode( - @[ - "--api-port=8080", - "--data-dir=" & dataDir, - "--nat=none", - "--listen-addrs=/ip4/127.0.0.1/tcp/0", - "--disc-port=8090", - "--block-ttl=" & $blockTtlSeconds, - "--block-mi=1", - "--block-mn=10", - ], - false, - "cli-test-node", - ) - await node.waitUntilStarted() - - proc uploadTestFile(): string = - let client = newHttpClient() - let uploadUrl = baseurl & "/data" - let uploadResponse = client.post(uploadUrl, content) - check uploadResponse.status == "200 OK" - client.close() - uploadResponse.body - - proc downloadTestFile(contentId: string, local = false): Response = - let client = newHttpClient(timeout = 3000) - let downloadUrl = - baseurl & "/data/" & contentId & (if local: "" else: "/network/stream") - - let content = client.get(downloadUrl) - client.close() - content - - proc hasFile(contentId: string): bool = - let client = newHttpClient(timeout = 3000) - let dataLocalUrl = baseurl & "/data/" & contentId - let content = client.get(dataLocalUrl) - client.close() - content.code == Http200 - - test "node retains not-expired file": - await startTestNode(blockTtlSeconds = 10) - - let contentId = uploadTestFile() + let contentId = clientApi.upload(content).get await sleepAsync(2.seconds) - let response = downloadTestFile(contentId, local = true) + let download = clientApi.download(contentId, local = true) + check: - hasFile(contentId) - response.status == "200 OK" - response.body == content + download.isOk + download.get == string.fromBytes(content) - test "node deletes expired file": - await startTestNode(blockTtlSeconds = 1) + test "node deletes expired file", + NodeConfigs( + clients: CodexConfigs + .init(nodes = 1) + .withBlockTtl(0, 1) + .withBlockMaintenanceInterval(0, 1).some, + providers: CodexConfigs.none, + ): + let client = clients()[0] + let clientApi = client.client - let contentId = uploadTestFile() + let contentId = clientApi.upload(content).get await sleepAsync(3.seconds) + let download = clientApi.download(contentId, local = true) + check: - not hasFile(contentId) - downloadTestFile(contentId, local = true).code == Http404 + download.isFailure + download.error.msg == "404 Not Found" diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index e7604de7..29a3bc6f 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -50,7 +50,7 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": check eventually(requestId.isSome, timeout = expiry.int * 1000) let request = await marketplace.getRequest(requestId.get) - let cidFromRequest = Cid.init(request.content.cid).get() + let cidFromRequest = request.content.cid let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) check downloaded.isOk check downloaded.get.toHex == data.toHex diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 7813485b..727f3fad 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -34,28 +34,28 @@ marketplacesuite "Marketplace": await ethProvider.advanceTime(1.u256) test "nodes negotiate contracts on the marketplace", marketplaceConfig: - let size = 0xFFFFFF.u256 + let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) # host makes storage available let availability = host.postAvailability( totalSize = size, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size * minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, ).get # client requests storage let cid = client.upload(data).get - let id = client.requestStorage( + let id = await client.requestStorage( cid, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = 10 * 60.uint64, collateralPerByte = collateralPerByte, nodes = ecNodes, tolerance = ecTolerance, - ).get + ) check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get @@ -71,34 +71,34 @@ marketplacesuite "Marketplace": test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig: - let size = 0xFFFFFF.u256 + let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner()) let tokenAddress = await marketplace.token() let token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) - let duration = 20 * 60.u256 + let duration = 20 * 60.uint64 # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) discard host.postAvailability( totalSize = size, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size * minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, ).get # client requests storage let cid = client.upload(data).get - let id = client.requestStorage( + let id = await client.requestStorage( cid, duration = duration, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = 10 * 60.uint64, collateralPerByte = collateralPerByte, nodes = ecNodes, tolerance = ecTolerance, - ).get + ) check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get @@ -109,13 +109,13 @@ marketplacesuite "Marketplace": # Proving mechanism uses blockchain clock to do proving/collect/cleanup round # hence we must use `advanceTime` over `sleepAsync` as Hardhat does mine new blocks # only with new transaction - await ethProvider.advanceTime(duration) + await ethProvider.advanceTime(duration.u256) # Checking that the hosting node received reward for at least the time between let slotSize = slotSize(blocks, ecNodes, ecTolerance) let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= - (duration - 5 * 60) * pricePerSlotPerSecond * ecNodes.u256 + (duration - 5 * 60).u256 * pricePerSlotPerSecond * ecNodes.u256 # Checking that client node receives some funds back that were not used for the host nodes check eventually( @@ -157,19 +157,19 @@ marketplacesuite "Marketplace payouts": # provider makes storage available let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) - let totalAvailabilitySize = datasetSize div 2 + let totalAvailabilitySize = (datasetSize div 2).truncate(uint64) discard providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation totalSize = totalAvailabilitySize, - duration = duration.u256, + duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = collateralPerByte * totalAvailabilitySize, + totalCollateral = collateralPerByte * totalAvailabilitySize.u256, ) let cid = clientApi.upload(data).get - var slotIdxFilled = none UInt256 + var slotIdxFilled = none uint64 proc onSlotFilled(eventResult: ?!SlotFilled) = assert not eventResult.isErr slotIdxFilled = some (!eventResult).slotIndex diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index a547890b..ab29ca4e 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -43,7 +43,10 @@ marketplacesuite "Hosts submit regular proofs": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -117,7 +120,10 @@ marketplacesuite "Simulate invalid proofs": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -128,7 +134,7 @@ marketplacesuite "Simulate invalid proofs": duration = duration, nodes = ecNodes, tolerance = ecTolerance, - proofProbability = 1, + proofProbability = 1.u256, ) let requestId = client0.requestId(purchaseId).get @@ -177,7 +183,10 @@ marketplacesuite "Simulate invalid proofs": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -188,7 +197,7 @@ marketplacesuite "Simulate invalid proofs": duration = duration, nodes = ecNodes, tolerance = ecTolerance, - proofProbability = 1, + proofProbability = 1.u256, ) let requestId = client0.requestId(purchaseId).get diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4e08e7a8..259efcff 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -11,18 +11,18 @@ twonodessuite "Purchasing": let cid = client1.upload(data).get let id1 = client1.requestStorage( cid, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 10, + expiry = 10.uint64, collateralPerByte = 1.u256, ).get let id2 = client1.requestStorage( cid, - duration = 400.u256, + duration = 400.uint64, pricePerBytePerSecond = 2.u256, proofProbability = 6.u256, - expiry = 10, + expiry = 10.uint64, collateralPerByte = 2.u256, ).get check id1 != id2 @@ -37,20 +37,22 @@ twonodessuite "Purchasing": let cid = client1.upload(byteutils.toHex(data)).get let id = client1.requestStorage( cid, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 30, + expiry = 30.uint64, collateralPerByte = 1.u256, nodes = 3, tolerance = 1, ).get let request = client1.getPurchase(id).get.request.get - check request.ask.duration == 100.u256 + + check request.content.cid.data.buffer.len > 0 + check request.ask.duration == 100.uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == 30 + check request.expiry == 30.uint64 check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 @@ -76,10 +78,10 @@ twonodessuite "Purchasing": let cid = client1.upload(data).get let id = client1.requestStorage( cid, - duration = 10 * 60.u256, + duration = 10 * 60.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 5 * 60, + expiry = 5 * 60.uint64, collateralPerByte = 1.u256, nodes = 3.uint, tolerance = 1.uint, @@ -91,10 +93,10 @@ twonodessuite "Purchasing": check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) let request = client1.getPurchase(id).get.request.get - check request.ask.duration == (10 * 60).u256 + check request.ask.duration == (10 * 60).uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == (5 * 60).u256 + check request.expiry == (5 * 60).uint64 check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 @@ -105,7 +107,7 @@ twonodessuite "Purchasing": let responseMissing = client1.requestStorageRaw( cid, - duration = 1.u256, + duration = 1.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, @@ -115,11 +117,11 @@ twonodessuite "Purchasing": let responseBefore = client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 10, + expiry = 10.uint64, ) check responseBefore.status == "400 Bad Request" check "Expiry needs value bigger then zero and smaller then the request's duration" in diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 8cbe9817..a748c98e 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,10 +1,13 @@ import std/httpclient import std/sequtils import std/strformat -from pkg/libp2p import `==` +from pkg/libp2p import `==`, `$`, Cid import pkg/codex/units +import pkg/codex/manifest import ./twonodes import ../examples +import ../codex/examples +import ../codex/slots/helpers import json twonodessuite "REST API": @@ -22,12 +25,12 @@ twonodessuite "REST API": test "node shows used and available space", twoNodesConfig: discard client1.upload("some file contents").get - let totalSize = 12.u256 + let totalSize = 12.uint64 let minPricePerBytePerSecond = 1.u256 - let totalCollateral = totalSize * minPricePerBytePerSecond + let totalCollateral = totalSize.u256 * minPricePerBytePerSecond discard client1.postAvailability( totalSize = totalSize, - duration = 2.u256, + duration = 2.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ).get @@ -35,7 +38,7 @@ twonodessuite "REST API": check: space.totalBlocks == 2 space.quotaMaxBytes == 8589934592.NBytes - space.quotaUsedBytes == 65598.NBytes + space.quotaUsedBytes == 65592.NBytes space.quotaReservedBytes == 12.NBytes test "node lists local files", twoNodesConfig: @@ -53,11 +56,11 @@ twonodessuite "REST API": let cid = client1.upload("some file contents").get let response = client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 9, + expiry = 9.uint64, ) check: @@ -71,11 +74,11 @@ twonodessuite "REST API": let cid = client1.upload(data).get let response = client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 9, + expiry = 9.uint64, ) check: @@ -84,10 +87,10 @@ twonodessuite "REST API": test "request storage fails if tolerance is zero", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 let nodes = 3 let tolerance = 0 @@ -100,13 +103,33 @@ twonodessuite "REST API": check responseBefore.status == "400 Bad Request" check responseBefore.body == "Tolerance needs to be bigger then zero" + test "request storage fails if duration exceeds limit", twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) + let cid = client1.upload(data).get + let duration = (31 * 24 * 60 * 60).uint64 + # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 2 + let pricePerBytePerSecond = 1.u256 + + var responseBefore = client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == "400 Bad Request" + check "Duration exceeds limit of" in responseBefore.body + test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] @@ -126,10 +149,10 @@ twonodessuite "REST API": twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 let ecParams = @[(0, 1), (1, 2), (2, 3)] @@ -153,10 +176,10 @@ twonodessuite "REST API": fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig: let data = await RandomChunker.example(blocks = minBlocks) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 var responseBefore = client1.requestStorageRaw( @@ -229,8 +252,6 @@ twonodessuite "REST API": check manifest["filename"].getStr() == "example.txt" check manifest.hasKey("mimetype") == true check manifest["mimetype"].getStr() == "text/plain" - check manifest.hasKey("uploadedAt") == true - check manifest["uploadedAt"].getInt() > 0 test "node set the headers when for download", twoNodesConfig: let headers = newHttpHeaders( @@ -263,3 +284,24 @@ twonodessuite "REST API": check localResponse.headers.hasKey("Content-Disposition") == true check localResponse.headers["Content-Disposition"] == "attachment; filename=\"example.txt\"" + + test "should delete a dataset when requested", twoNodesConfig: + let cid = client1.upload("some file contents").get + + var response = client1.downloadRaw($cid, local = true) + check response.body == "some file contents" + + client1.delete(cid).get + + response = client1.downloadRaw($cid, local = true) + check response.status == "404 Not Found" + + test "should return 200 when attempting delete of non-existing block", twoNodesConfig: + let response = client1.deleteRaw($(Cid.example())) + check response.status == "204 No Content" + + test "should return 200 when attempting delete of non-existing dataset", + twoNodesConfig: + let cid = Manifest.example().makeManifestBlock().get.cid + let response = client1.deleteRaw($cid) + check response.status == "204 No Content" diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index a77e5649..6c5c30d5 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -31,14 +31,14 @@ multinodesuite "Sales": test "node handles new storage availability", salesConfig: let availability1 = host.postAvailability( - totalSize = 1.u256, - duration = 2.u256, + totalSize = 1.uint64, + duration = 2.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 4.u256, ).get let availability2 = host.postAvailability( - totalSize = 4.u256, - duration = 5.u256, + totalSize = 4.uint64, + duration = 5.uint64, minPricePerBytePerSecond = 6.u256, totalCollateral = 7.u256, ).get @@ -46,8 +46,8 @@ multinodesuite "Sales": test "node lists storage that is for sale", salesConfig: let availability = host.postAvailability( - totalSize = 1.u256, - duration = 2.u256, + totalSize = 1.uint64, + duration = 2.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 4.u256, ).get @@ -56,7 +56,7 @@ multinodesuite "Sales": test "updating non-existing availability", salesConfig: let nonExistingResponse = host.patchAvailabilityRaw( AvailabilityId.example, - duration = 100.u256.some, + duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) @@ -64,60 +64,60 @@ multinodesuite "Sales": test "updating availability", salesConfig: let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, + totalSize = 140000.uint64, + duration = 200.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 300.u256, ).get host.patchAvailability( availability.id, - duration = 100.u256.some, + duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get - check updatedAvailability.duration == 100 + check updatedAvailability.duration == 100.uint64 check updatedAvailability.minPricePerBytePerSecond == 2 check updatedAvailability.totalCollateral == 200 - check updatedAvailability.totalSize == 140000 - check updatedAvailability.freeSize == 140000 + check updatedAvailability.totalSize == 140000.uint64 + check updatedAvailability.freeSize == 140000.uint64 test "updating availability - freeSize is not allowed to be changed", salesConfig: let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, + totalSize = 140000.uint64, + duration = 200.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 300.u256, ).get let freeSizeResponse = - host.patchAvailabilityRaw(availability.id, freeSize = 110000.u256.some) + host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) check freeSizeResponse.status == "400 Bad Request" check "not allowed" in freeSizeResponse.body test "updating availability - updating totalSize", salesConfig: let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, + totalSize = 140000.uint64, + duration = 200.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 300.u256, ).get - host.patchAvailability(availability.id, totalSize = 100000.u256.some) + host.patchAvailability(availability.id, totalSize = 100000.uint64.some) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 test "updating availability - updating totalSize does not allow bellow utilized", salesConfig: - let originalSize = 0xFFFFFF.u256 + let originalSize = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = 8) let minPricePerBytePerSecond = 3.u256 let collateralPerByte = 1.u256 - let totalCollateral = originalSize * collateralPerByte + let totalCollateral = originalSize.u256 * collateralPerByte let availability = host.postAvailability( totalSize = originalSize, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ).get @@ -126,10 +126,10 @@ multinodesuite "Sales": let cid = client.upload(data).get let id = client.requestStorage( cid, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = (10 * 60).uint64, collateralPerByte = collateralPerByte, nodes = 3, tolerance = 1, @@ -140,9 +140,8 @@ multinodesuite "Sales": check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = host.patchAvailabilityRaw( - availability.id, totalSize = (utilizedSize - 1.u256).some - ) + let totalSizeResponse = + host.patchAvailabilityRaw(availability.id, totalSize = (utilizedSize - 1).some) check totalSizeResponse.status == "400 Bad Request" check "totalSize must be larger then current totalSize" in totalSizeResponse.body diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index 8b7fbc5b..7f4bc851 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -37,7 +37,7 @@ marketplacesuite "Validation": const blocks = 8 const ecNodes = 3 const ecTolerance = 1 - const proofProbability = 1 + const proofProbability = 1.u256 const collateralPerByte = 1.u256 const minPricePerBytePerSecond = 1.u256 @@ -100,7 +100,10 @@ marketplacesuite "Validation": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -167,7 +170,10 @@ marketplacesuite "Validation": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get diff --git a/tests/integration/twonodes.nim b/tests/integration/twonodes.nim index 5666690e..eeceb20d 100644 --- a/tests/integration/twonodes.nim +++ b/tests/integration/twonodes.nim @@ -1,4 +1,3 @@ -import std/os import std/macros import pkg/questionable import ./multinodes diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index e74d3397..32a6c13d 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit e74d3397a133eaf1eb95d9ce59f56747a7c8c30b +Subproject commit 32a6c13def1c1505765e9e0dc465117fba98c161 diff --git a/vendor/nim-ethers b/vendor/nim-ethers index 1cfccb96..d2b11a86 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit 1cfccb9695fa47860bf7ef3d75da9019096a3933 +Subproject commit d2b11a865796a55296027f8ffba68398035ad435 diff --git a/vendor/nim-leopard b/vendor/nim-leopard index 3e09d811..7506b90f 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit 3e09d8113f874f3584c3fe93818541b2ff9fb9c3 +Subproject commit 7506b90f9c650c02b96bf525d4fd1bd4942a495f