diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dbe18e64..0d27077a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,6 @@ env: cache_nonce: 0 # Allows for easily busting actions/cache caches nim_version: pinned - concurrency: group: ${{ github.workflow }}-${{ github.ref || github.run_id }} cancel-in-progress: true @@ -23,23 +22,23 @@ jobs: matrix: ${{ steps.matrix.outputs.matrix }} cache_nonce: ${{ env.cache_nonce }} steps: - - name: Compute matrix - id: matrix - uses: fabiocaccamo/create-matrix-action@v4 - with: - matrix: | - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {amd64}, builder {macos-13}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2} - os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2} - os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2} - os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2} + - name: Compute matrix + id: matrix + uses: fabiocaccamo/create-matrix-action@v4 + with: + matrix: | + os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2} build: needs: matrix diff --git a/build.nims b/build.nims index 3d1a3cac..a9a0e553 100644 --- a/build.nims +++ b/build.nims @@ -41,6 +41,9 @@ task testContracts, "Build & run Codex Contract tests": task testIntegration, "Run integration tests": buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true" test "testIntegration" + # use params to enable logging from the integration test executable + # test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " & + # "-d:chronicles_enabled_topics:integration:TRACE" task build, "build codex binary": codexTask() diff --git a/codex/contracts.nim b/codex/contracts.nim index ecf298f4..512571a8 100644 --- a/codex/contracts.nim +++ b/codex/contracts.nim @@ -2,8 +2,10 @@ import contracts/requests import contracts/marketplace import contracts/market import contracts/interactions +import contracts/provider export requests export marketplace export market export interactions +export provider diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 049e38bb..a06a9486 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,6 +1,4 @@ -import std/sequtils import std/strutils -import std/sugar import pkg/ethers import pkg/upraises import pkg/questionable @@ -9,6 +7,7 @@ import ../logutils import ../market import ./marketplace import ./proofs +import ./provider export market @@ -467,18 +466,49 @@ method subscribeProofSubmission*(market: OnChainMarket, method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = await subscription.eventSubscription.unsubscribe() -method queryPastEvents*[T: MarketplaceEvent]( +method queryPastSlotFilledEvents*( market: OnChainMarket, - _: type T, - blocksAgo: int): Future[seq[T]] {.async.} = + fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} = convertEthersError: - let contract = market.contract - let provider = contract.provider + return await market.contract.queryFilter(SlotFilled, + fromBlock, + BlockTag.latest) - let head = await provider.getBlockNumber() - let fromBlock = BlockTag.init(head - blocksAgo.abs.u256) +method queryPastSlotFilledEvents*( + market: OnChainMarket, + blocksAgo: int): Future[seq[SlotFilled]] {.async.} = - return await contract.queryFilter(T, - fromBlock, - BlockTag.latest) + convertEthersError: + let fromBlock = + await market.contract.provider.pastBlockTag(blocksAgo) + + return await market.queryPastSlotFilledEvents(fromBlock) + +method queryPastSlotFilledEvents*( + market: OnChainMarket, + fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} = + + convertEthersError: + let fromBlock = + await market.contract.provider.blockNumberForEpoch(fromTime) + return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) + +method queryPastStorageRequestedEvents*( + market: OnChainMarket, + fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} = + + convertEthersError: + return await market.contract.queryFilter(StorageRequested, + fromBlock, + BlockTag.latest) + +method queryPastStorageRequestedEvents*( + market: OnChainMarket, + blocksAgo: int): Future[seq[StorageRequested]] {.async.} = + + convertEthersError: + let fromBlock = + await market.contract.provider.pastBlockTag(blocksAgo) + + return await market.queryPastStorageRequestedEvents(fromBlock) diff --git a/codex/contracts/provider.nim b/codex/contracts/provider.nim new file mode 100644 index 00000000..62098fb5 --- /dev/null +++ b/codex/contracts/provider.nim @@ -0,0 +1,126 @@ +import pkg/ethers/provider +import pkg/chronos +import pkg/questionable + +import ../logutils + +from ../clock import SecondsSince1970 + +logScope: + topics = "marketplace onchain provider" + +proc raiseProviderError(message: string) {.raises: [ProviderError].} = + raise newException(ProviderError, message) + +proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag): + Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = + without latestBlock =? await provider.getBlock(blockTag): + raiseProviderError("Could not get latest block") + + without latestBlockNumber =? latestBlock.number: + raiseProviderError("Could not get latest block number") + + return (latestBlockNumber, latestBlock.timestamp) + +proc binarySearchFindClosestBlock( + provider: Provider, + epochTime: int, + low: UInt256, + high: UInt256): Future[UInt256] {.async: (raises: [ProviderError]).} = + let (_, lowTimestamp) = + await provider.blockNumberAndTimestamp(BlockTag.init(low)) + let (_, highTimestamp) = + await provider.blockNumberAndTimestamp(BlockTag.init(high)) + if abs(lowTimestamp.truncate(int) - epochTime) < + abs(highTimestamp.truncate(int) - epochTime): + return low + else: + return high + +proc binarySearchBlockNumberForEpoch( + provider: Provider, + epochTime: UInt256, + latestBlockNumber: UInt256, + earliestBlockNumber: UInt256): Future[UInt256] + {.async: (raises: [ProviderError]).} = + var low = earliestBlockNumber + var high = latestBlockNumber + + while low <= high: + if low == 0 and high == 0: + return low + let mid = (low + high) div 2 + let (midBlockNumber, midBlockTimestamp) = + await provider.blockNumberAndTimestamp(BlockTag.init(mid)) + + if midBlockTimestamp < epochTime: + low = mid + 1 + elif midBlockTimestamp > epochTime: + high = mid - 1 + else: + return midBlockNumber + # NOTICE that by how the binary search is implemented, when it finishes + # low is always greater than high - this is why we use high, where + # intuitively we would use low: + await provider.binarySearchFindClosestBlock( + epochTime.truncate(int), low=high, high=low) + +proc blockNumberForEpoch*( + provider: Provider, + epochTime: SecondsSince1970): Future[UInt256] + {.async: (raises: [ProviderError]).} = + let epochTimeUInt256 = epochTime.u256 + let (latestBlockNumber, latestBlockTimestamp) = + await provider.blockNumberAndTimestamp(BlockTag.latest) + let (earliestBlockNumber, earliestBlockTimestamp) = + await provider.blockNumberAndTimestamp(BlockTag.earliest) + + # Initially we used the average block time to predict + # the number of blocks we need to look back in order to find + # the block number corresponding to the given epoch time. + # This estimation can be highly inaccurate if block time + # was changing in the past or is fluctuating and therefore + # we used that information initially only to find out + # if the available history is long enough to perform effective search. + # It turns out we do not have to do that. There is an easier way. + # + # First we check if the given epoch time equals the timestamp of either + # the earliest or the latest block. If it does, we just return the + # block number of that block. + # + # Otherwise, if the earliest available block is not the genesis block, + # we should check the timestamp of that earliest block and if it is greater + # than the epoch time, we should issue a warning and return + # that earliest block number. + # In all other cases, thus when the earliest block is not the genesis + # block but its timestamp is not greater than the requested epoch time, or + # if the earliest available block is the genesis block, + # (which means we have the whole history available), we should proceed with + # the binary search. + # + # Additional benefit of this method is that we do not have to rely + # on the average block time, which not only makes the whole thing + # more reliable, but also easier to test. + + # Are lucky today? + if earliestBlockTimestamp == epochTimeUInt256: + return earliestBlockNumber + if latestBlockTimestamp == epochTimeUInt256: + return latestBlockNumber + + if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256: + let availableHistoryInDays = + (latestBlockTimestamp - earliestBlockTimestamp) div + 1.days.secs.u256 + warn "Short block history detected.", earliestBlockTimestamp = + earliestBlockTimestamp, days = availableHistoryInDays + return earliestBlockNumber + + return await provider.binarySearchBlockNumberForEpoch( + epochTimeUInt256, latestBlockNumber, earliestBlockNumber) + +proc pastBlockTag*(provider: Provider, + blocksAgo: int): + Future[BlockTag] {.async: (raises: [ProviderError]).} = + let head = await provider.getBlockNumber() + return BlockTag.init(head - blocksAgo.abs.u256) diff --git a/codex/market.nim b/codex/market.nim index cb86e0d7..38df9669 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -246,8 +246,27 @@ method subscribeProofSubmission*(market: Market, method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} = raiseAssert("not implemented") -method queryPastEvents*[T: MarketplaceEvent]( - market: Market, - _: type T, - blocksAgo: int): Future[seq[T]] {.base, async.} = +method queryPastSlotFilledEvents*( + market: Market, + fromBlock: BlockTag): Future[seq[SlotFilled]] {.base, async.} = + raiseAssert("not implemented") + +method queryPastSlotFilledEvents*( + market: Market, + blocksAgo: int): Future[seq[SlotFilled]] {.base, async.} = + raiseAssert("not implemented") + +method queryPastSlotFilledEvents*( + market: Market, + fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.base, async.} = + raiseAssert("not implemented") + +method queryPastStorageRequestedEvents*( + market: Market, + fromBlock: BlockTag): Future[seq[StorageRequested]] {.base, async.} = + raiseAssert("not implemented") + +method queryPastStorageRequestedEvents*( + market: Market, + blocksAgo: int): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") diff --git a/codex/validation.nim b/codex/validation.nim index d00f5772..cc663c5f 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -23,6 +23,9 @@ type proofTimeout: UInt256 config: ValidationConfig +const + MaxStorageRequestDuration = 30.days + logScope: topics = "codex validator" @@ -56,15 +59,15 @@ func maxSlotsConstraintRespected(validation: Validation): bool = validation.slots.len < validation.config.maxSlots func shouldValidateSlot(validation: Validation, slotId: SlotId): bool = - if (validationGroups =? validation.config.groups): - (groupIndexForSlotId(slotId, validationGroups) == - validation.config.groupIndex) and - validation.maxSlotsConstraintRespected - else: - validation.maxSlotsConstraintRespected + without validationGroups =? validation.config.groups: + return true + groupIndexForSlotId(slotId, validationGroups) == + validation.config.groupIndex proc subscribeSlotFilled(validation: Validation) {.async.} = proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + if not validation.maxSlotsConstraintRespected: + return let slotId = slotId(requestId, slotIndex) if validation.shouldValidateSlot(slotId): trace "Adding slot", slotId @@ -78,7 +81,7 @@ proc removeSlotsThatHaveEnded(validation: Validation) {.async.} = for slotId in slots: let state = await validation.market.slotState(slotId) if state != SlotState.Filled: - trace "Removing slot", slotId + trace "Removing slot", slotId, slotState = state ended.incl(slotId) validation.slots.excl(ended) @@ -119,14 +122,37 @@ proc run(validation: Validation) {.async.} = except CatchableError as e: error "Validation failed", msg = e.msg +proc epochForDurationBackFromNow(validation: Validation, + duration: Duration): SecondsSince1970 = + return validation.clock.now - duration.secs + +proc restoreHistoricalState(validation: Validation) {.async.} = + trace "Restoring historical state..." + let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration) + let slotFilledEvents = await validation.market.queryPastSlotFilledEvents( + fromTime = startTimeEpoch) + for event in slotFilledEvents: + if not validation.maxSlotsConstraintRespected: + break + let slotId = slotId(event.requestId, event.slotIndex) + let slotState = await validation.market.slotState(slotId) + if slotState == SlotState.Filled and validation.shouldValidateSlot(slotId): + trace "Adding slot [historical]", slotId + validation.slots.incl(slotId) + trace "Historical state restored", numberOfSlots = validation.slots.len + proc start*(validation: Validation) {.async.} = + trace "Starting validator", groups = validation.config.groups, + groupIndex = validation.config.groupIndex validation.periodicity = await validation.market.periodicity() validation.proofTimeout = await validation.market.proofTimeout() await validation.subscribeSlotFilled() + await validation.restoreHistoricalState() validation.running = validation.run() proc stop*(validation: Validation) {.async.} = - await validation.running.cancelAndWait() + if not isNil(validation.running): + await validation.running.cancelAndWait() while validation.subscriptions.len > 0: let subscription = validation.subscriptions.pop() await subscription.unsubscribe() diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 07eeb856..7a5c94b8 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -8,11 +8,18 @@ import pkg/codex/market import pkg/codex/contracts/requests import pkg/codex/contracts/proofs import pkg/codex/contracts/config + +from pkg/ethers import BlockTag +import codex/clock + import ../examples export market export tables +logScope: + topics = "mockMarket" + type MockMarket* = ref object of Market periodicity: Periodicity @@ -40,6 +47,7 @@ type config*: MarketplaceConfig canReserveSlot*: bool reserveSlotThrowError*: ?(ref MarketError) + clock: ?Clock Fulfillment* = object requestId*: RequestId proof*: Groth16Proof @@ -49,6 +57,7 @@ type host*: Address slotIndex*: UInt256 proof*: Groth16Proof + timestamp: ?SecondsSince1970 Subscriptions = object onRequest: seq[RequestSubscription] onFulfillment: seq[FulfillmentSubscription] @@ -94,7 +103,7 @@ proc hash*(address: Address): Hash = proc hash*(requestId: RequestId): Hash = hash(requestId.toArray) -proc new*(_: type MockMarket): MockMarket = +proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = ## Create a new mocked Market instance ## let config = MarketplaceConfig( @@ -111,7 +120,8 @@ proc new*(_: type MockMarket): MockMarket = downtimeProduct: 67.uint8 ) ) - MockMarket(signer: Address.example, config: config, canReserveSlot: true) + MockMarket(signer: Address.example, config: config, + canReserveSlot: true, clock: clock) method getSigner*(market: MockMarket): Future[Address] {.async.} = return market.signer @@ -248,7 +258,8 @@ proc fillSlot*(market: MockMarket, requestId: requestId, slotIndex: slotIndex, proof: proof, - host: host + host: host, + timestamp: market.clock.?now ) market.filled.add(slot) market.slotState[slotId(slot.requestId, slot.slotIndex)] = SlotState.Filled @@ -472,21 +483,51 @@ method subscribeProofSubmission*(mock: MockMarket, mock.subscriptions.onProofSubmitted.add(subscription) return subscription -method queryPastEvents*[T: MarketplaceEvent]( - market: MockMarket, - _: type T, - blocksAgo: int): Future[seq[T]] {.async.} = +method queryPastStorageRequestedEvents*( + market: MockMarket, + fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} = + return market.requested.map(request => + StorageRequested(requestId: request.id, + ask: request.ask, + expiry: request.expiry) + ) - if T of StorageRequested: - return market.requested.map(request => - StorageRequested(requestId: request.id, - ask: request.ask, - expiry: request.expiry) - ) - elif T of SlotFilled: - return market.filled.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) - ) +method queryPastStorageRequestedEvents*( + market: MockMarket, + blocksAgo: int): Future[seq[StorageRequested]] {.async.} = + return market.requested.map(request => + StorageRequested(requestId: request.id, + ask: request.ask, + expiry: request.expiry) + ) + +method queryPastSlotFilledEvents*( + market: MockMarket, + fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} = + return market.filled.map(slot => + SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + ) + +method queryPastSlotFilledEvents*( + market: MockMarket, + blocksAgo: int): Future[seq[SlotFilled]] {.async.} = + return market.filled.map(slot => + SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + ) + +method queryPastSlotFilledEvents*( + market: MockMarket, + fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} = + let filtered = market.filled.filter( + proc (slot: MockSlot): bool = + if timestamp =? slot.timestamp: + return timestamp >= fromTime + else: + true + ) + return filtered.map(slot => + SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + ) method unsubscribe*(subscription: RequestSubscription) {.async.} = subscription.market.subscriptions.onRequest.keepItIf(it != subscription) diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim index 7988269b..2cfe2f06 100644 --- a/tests/codex/testvalidation.nim +++ b/tests/codex/testvalidation.nim @@ -1,9 +1,10 @@ import pkg/chronos import std/strformat -import std/random +import std/times import codex/validation import codex/periods +import codex/clock import ../asynctest import ./helpers/mockmarket @@ -11,6 +12,9 @@ import ./helpers/mockclock import ./examples import ./helpers +logScope: + topics = "testValidation" + asyncchecksuite "validation": let period = 10 let timeout = 5 @@ -20,10 +24,10 @@ asyncchecksuite "validation": let proof = Groth16Proof.example let collateral = slot.request.ask.collateral - var validation: Validation var market: MockMarket var clock: MockClock var groupIndex: uint16 + var validation: Validation proc initValidationConfig(maxSlots: MaxSlots, validationGroups: ?ValidationGroups, @@ -32,19 +36,27 @@ asyncchecksuite "validation": maxSlots, groups=validationGroups, groupIndex), error: raiseAssert fmt"Creating ValidationConfig failed! Error msg: {error.msg}" validationConfig + + proc newValidation(clock: Clock, + market: Market, + maxSlots: MaxSlots, + validationGroups: ?ValidationGroups, + groupIndex: uint16 = 0): Validation = + let validationConfig = initValidationConfig( + maxSlots, validationGroups, groupIndex) + Validation.new(clock, market, validationConfig) setup: groupIndex = groupIndexForSlotId(slot.id, !validationGroups) - market = MockMarket.new() clock = MockClock.new() - let validationConfig = initValidationConfig( - maxSlots, validationGroups, groupIndex) - validation = Validation.new(clock, market, validationConfig) + market = MockMarket.new(clock = Clock(clock).some) market.config.proofs.period = period.u256 market.config.proofs.timeout = timeout.u256 - await validation.start() + validation = newValidation( + clock, market, maxSlots, validationGroups, groupIndex) teardown: + # calling stop on validation that did not start is harmless await validation.stop() proc advanceToNextPeriod = @@ -79,6 +91,7 @@ asyncchecksuite "validation": test "initializing ValidationConfig fails when maxSlots is negative " & "(validationGroups set)": let maxSlots = -1 + let groupIndex = 0'u16 let validationConfig = ValidationConfig.init( maxSlots = maxSlots, groups = validationGroups, groupIndex) check validationConfig.isFailure == true @@ -86,45 +99,41 @@ asyncchecksuite "validation": fmt"be greater than or equal to 0! (got: {maxSlots})" test "slot is not observed if it is not in the validation group": - let validationConfig = initValidationConfig(maxSlots, validationGroups, - (groupIndex + 1) mod uint16(!validationGroups)) - let validation = Validation.new(clock, market, validationConfig) + validation = newValidation(clock, market, maxSlots, validationGroups, + (groupIndex + 1) mod uint16(!validationGroups)) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) - await validation.stop() check validation.slots.len == 0 test "when a slot is filled on chain, it is added to the list": + await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) check validation.slots == @[slot.id] test "slot should be observed if maxSlots is set to 0": - let validationConfig = initValidationConfig( - maxSlots = 0, ValidationGroups.none) - let validation = Validation.new(clock, market, validationConfig) + validation = newValidation(clock, market, maxSlots = 0, ValidationGroups.none) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) - await validation.stop() check validation.slots == @[slot.id] test "slot should be observed if validation group is not set (and " & "maxSlots is not 0)": - let validationConfig = initValidationConfig( - maxSlots, ValidationGroups.none) - let validation = Validation.new(clock, market, validationConfig) + validation = newValidation(clock, market, maxSlots, ValidationGroups.none) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) - await validation.stop() check validation.slots == @[slot.id] for state in [SlotState.Finished, SlotState.Failed]: test fmt"when slot state changes to {state}, it is removed from the list": + validation = newValidation(clock, market, maxSlots, validationGroups) + await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) market.slotState[slot.id] = state advanceToNextPeriod() check eventually validation.slots.len == 0 test "when a proof is missed, it is marked as missing": + await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) market.setCanProofBeMarkedAsMissing(slot.id, true) advanceToNextPeriod() @@ -132,6 +141,7 @@ asyncchecksuite "validation": check market.markedAsMissingProofs.contains(slot.id) test "when a proof can not be marked as missing, it will not be marked": + await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) market.setCanProofBeMarkedAsMissing(slot.id, false) advanceToNextPeriod() @@ -139,13 +149,73 @@ asyncchecksuite "validation": check market.markedAsMissingProofs.len == 0 test "it does not monitor more than the maximum number of slots": - let validationGroups = ValidationGroups.none - let validationConfig = initValidationConfig( - maxSlots, validationGroups) - let validation = Validation.new(clock, market, validationConfig) + validation = newValidation(clock, market, maxSlots, ValidationGroups.none) await validation.start() for _ in 0.. 291 + # 1728436104 => 291 + # 1728436105 => 292 + # 1728436106 => 292 + # 1728436110 => 292 + proc generateExpectations( + blocks: seq[(UInt256, UInt256)]): seq[Expectations] = + var expectations: seq[Expectations] = @[] + for i in 0.. //_.log + .withLogTopics("purchases", "onchain") + .some, + + providers: + CodexConfigs.init(nodes=1) + .withSimulateProofFailures(idx=0, failEveryNProofs=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("sales", "onchain") + .some, + + validators: + CodexConfigs.init(nodes=2) + .withValidationGroups(groups = 2) + .withValidationGroupIndex(idx = 0, groupIndex = 0) + .withValidationGroupIndex(idx = 1, groupIndex = 1) + # .debug() # uncomment to enable console log output + .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("validator") # each topic as a separate string argument + .some + ): + let client0 = clients()[0].client + let expiry = 5.periods + let duration = expiry + 10.periods + + # let mine a block to sync the blocktime with the current clock + discard await ethProvider.send("evm_mine") + + var currentTime = await ethProvider.currentTime() + let requestEndTime = currentTime.truncate(uint64) + duration + + let data = await RandomChunker.example(blocks=8) + + # TODO: better value for data.len below. This TODO is also present in + # testproofs.nim - we may want to address it or remove the comment. + createAvailabilities(data.len * 2, duration) + + let cid = client0.upload(data).get + let purchaseId = await client0.requestStorage( + cid, + expiry=expiry, + duration=duration, + nodes=nodes, + tolerance=tolerance, + proofProbability=proofProbability + ) + let requestId = client0.requestId(purchaseId).get + + debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId + + if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"), + timeout = (expiry + 60).int, step = 5): + debug "validation suite: timed out waiting for the purchase to start" + fail() + return + + discard await ethProvider.send("evm_mine") + currentTime = await ethProvider.currentTime() + let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int + + debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds + + check await marketplace.waitForRequestToFail( + requestId, + timeout = secondsTillRequestEnd + 60, + step = 5 + ) + + test "validator uses historical state to mark missing proofs", NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: + HardhatConfig.none, + + clients: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("purchases", "onchain") + .some, + + providers: + CodexConfigs.init(nodes=1) + .withSimulateProofFailures(idx=0, failEveryNProofs=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("sales", "onchain") + .some + ): + let client0 = clients()[0].client + let expiry = 5.periods + let duration = expiry + 10.periods + + # let mine a block to sync the blocktime with the current clock + discard await ethProvider.send("evm_mine") + + var currentTime = await ethProvider.currentTime() + let requestEndTime = currentTime.truncate(uint64) + duration + + let data = await RandomChunker.example(blocks=8) + + # TODO: better value for data.len below. This TODO is also present in + # testproofs.nim - we may want to address it or remove the comment. + createAvailabilities(data.len * 2, duration) + + let cid = client0.upload(data).get + let purchaseId = await client0.requestStorage( + cid, + expiry=expiry, + duration=duration, + nodes=nodes, + tolerance=tolerance, + proofProbability=proofProbability + ) + let requestId = client0.requestId(purchaseId).get + + debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId + + if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"), + timeout = (expiry + 60).int, step = 5): + debug "validation suite: timed out waiting for the purchase to start" + fail() + return + + # extra block just to make sure we have one that separates us + # from the block containing the last (past) SlotFilled event + discard await ethProvider.send("evm_mine") + + var validators = CodexConfigs.init(nodes=2) + .withValidationGroups(groups = 2) + .withValidationGroupIndex(idx = 0, groupIndex = 0) + .withValidationGroupIndex(idx = 1, groupIndex = 1) + # .debug() # uncomment to enable console log output + .withLogFile() # uncomment to output log file to: # tests/integration/logs/ //_.log + .withLogTopics("validator") # each topic as a separate string argument + + failAndTeardownOnError "failed to start validator nodes": + for config in validators.configs.mitems: + let node = await startValidatorNode(config) + running.add RunningNode( + role: Role.Validator, + node: node + ) + + discard await ethProvider.send("evm_mine") + currentTime = await ethProvider.currentTime() + let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int + + debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds + + check await marketplace.waitForRequestToFail( + requestId, + timeout = secondsTillRequestEnd + 60, + step = 5 + ) diff --git a/tests/testContracts.nim b/tests/testContracts.nim index 4283c10a..aff2c1d7 100644 --- a/tests/testContracts.nim +++ b/tests/testContracts.nim @@ -2,5 +2,6 @@ import ./contracts/testContracts import ./contracts/testMarket import ./contracts/testDeployment import ./contracts/testClock +import ./contracts/testProvider {.warning[UnusedImport]:off.} diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index b1f81ef4..f0a59ee4 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -6,6 +6,7 @@ import ./integration/testpurchasing import ./integration/testblockexpiration import ./integration/testmarketplace import ./integration/testproofs +import ./integration/testvalidator import ./integration/testecbug {.warning[UnusedImport]:off.}