Merge branch 'master' into chore/update-nim-version
# Conflicts: # codex/contracts/clock.nim # codex/contracts/market.nim # codex/utils/asyncstatemachine.nim # tests/integration/nodeprocess.nim # tests/integration/testecbug.nim # tests/integration/testmarketplace.nim # tests/integration/testproofs.nim
This commit is contained in:
commit
b9222d0415
|
@ -11,7 +11,6 @@ env:
|
|||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
@ -23,23 +22,23 @@ jobs:
|
|||
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ env.cache_nonce }}
|
||||
steps:
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
|
|
|
@ -17,6 +17,12 @@ on:
|
|||
- '!docker/codex.Dockerfile'
|
||||
- '!docker/docker-entrypoint.sh'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
run_release_tests:
|
||||
description: Run Release tests
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
|
||||
jobs:
|
||||
|
@ -28,6 +34,5 @@ jobs:
|
|||
nat_ip_auto: true
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
tag_suffix: dist-tests
|
||||
continuous_tests_list: PeersTest HoldMyBeerTest
|
||||
continuous_tests_duration: 12h
|
||||
run_release_tests: ${{ inputs.run_release_tests }}
|
||||
secrets: inherit
|
||||
|
|
|
@ -54,6 +54,11 @@ on:
|
|||
description: Continuous Tests duration
|
||||
required: false
|
||||
type: string
|
||||
run_release_tests:
|
||||
description: Run Release tests
|
||||
required: false
|
||||
type: string
|
||||
default: false
|
||||
|
||||
|
||||
env:
|
||||
|
@ -67,8 +72,8 @@ env:
|
|||
TAG_SHA: ${{ inputs.tag_sha }}
|
||||
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
||||
# Tests
|
||||
CONTINUOUS_TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
||||
CONTINUOUS_TESTS_BRANCH: master
|
||||
TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
||||
TESTS_BRANCH: master
|
||||
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
|
||||
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
|
||||
CONTINUOUS_TESTS_NAMEPREFIX: c-tests-ci
|
||||
|
@ -93,7 +98,7 @@ jobs:
|
|||
- target:
|
||||
os: linux
|
||||
arch: arm64
|
||||
builder: buildjet-4vcpu-ubuntu-2204-arm
|
||||
builder: buildjet-8vcpu-ubuntu-2204-arm
|
||||
|
||||
name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }}
|
||||
runs-on: ${{ matrix.builder }}
|
||||
|
@ -219,49 +224,73 @@ jobs:
|
|||
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
|
||||
|
||||
|
||||
# Compute Continuous Tests inputs
|
||||
# Compute Tests inputs
|
||||
compute-tests-inputs:
|
||||
name: Compute Continuous Tests list
|
||||
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
|
||||
name: Compute Tests inputs
|
||||
if: ${{ inputs.continuous_tests_list != '' || inputs.run_release_tests == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: publish
|
||||
outputs:
|
||||
source: ${{ steps.compute.outputs.source }}
|
||||
branch: ${{ steps.compute.outputs.branch }}
|
||||
branch: ${{ env.TESTS_BRANCH }}
|
||||
workflow_source: ${{ env.TESTS_SOURCE }}
|
||||
codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }}
|
||||
nameprefix: ${{ steps.compute.outputs.nameprefix }}
|
||||
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
|
||||
continuous_tests_duration: ${{ steps.compute.outputs.continuous_tests_duration }}
|
||||
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
|
||||
workflow_source: ${{ steps.compute.outputs.workflow_source }}
|
||||
steps:
|
||||
- name: Compute Continuous Tests list
|
||||
- name: Compute Tests inputs
|
||||
id: compute
|
||||
run: |
|
||||
echo "source=${{ format('{0}/{1}', github.server_url, env.CONTINUOUS_TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
|
||||
echo "branch=${{ env.CONTINUOUS_TESTS_BRANCH }}" >> "$GITHUB_OUTPUT"
|
||||
echo "source=${{ format('{0}/{1}', github.server_url, env.TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
|
||||
echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
|
||||
# Compute Continuous Tests inputs
|
||||
compute-continuous-tests-inputs:
|
||||
name: Compute Continuous Tests inputs
|
||||
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: compute-tests-inputs
|
||||
outputs:
|
||||
nameprefix: ${{ steps.compute.outputs.nameprefix }}
|
||||
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
|
||||
continuous_tests_duration: ${{ env.CONTINUOUS_TESTS_DURATION }}
|
||||
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
|
||||
steps:
|
||||
- name: Compute Continuous Tests inputs
|
||||
id: compute
|
||||
run: |
|
||||
echo "nameprefix=$(awk '{ print tolower($0) }' <<< ${{ env.CONTINUOUS_TESTS_NAMEPREFIX }})" >> "$GITHUB_OUTPUT"
|
||||
echo "continuous_tests_list=$(jq -cR 'split(" ")' <<< '${{ env.CONTINUOUS_TESTS_LIST }}')" >> "$GITHUB_OUTPUT"
|
||||
echo "continuous_tests_duration=${{ env.CONTINUOUS_TESTS_DURATION }}" >> "$GITHUB_OUTPUT"
|
||||
echo "workflow_source=${{ env.CONTINUOUS_TESTS_SOURCE }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
|
||||
# Run Continuous Tests
|
||||
run-tests:
|
||||
run-continuous-tests:
|
||||
name: Run Continuous Tests
|
||||
needs: [publish, compute-tests-inputs]
|
||||
needs: [compute-tests-inputs, compute-continuous-tests-inputs]
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
tests: ${{ fromJSON(needs.compute-tests-inputs.outputs.continuous_tests_list) }}
|
||||
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
|
||||
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
||||
with:
|
||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
|
||||
nameprefix: ${{ needs.compute-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||
nameprefix: ${{ needs.compute-continuous-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-continuous-tests-inputs.outputs.continuous_tests_duration }}
|
||||
tests_filter: ${{ matrix.tests }}
|
||||
tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
# Run Release Tests
|
||||
run-release-tests:
|
||||
name: Run Release Tests
|
||||
needs: [compute-tests-inputs]
|
||||
if: ${{ inputs.run_release_tests == 'true' }}
|
||||
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master
|
||||
with:
|
||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
|
||||
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
|
||||
secrets: inherit
|
||||
|
|
|
@ -20,7 +20,10 @@ jobs:
|
|||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
|
|
2
Makefile
2
Makefile
|
@ -48,7 +48,7 @@ ifeq ($(OS),Windows_NT)
|
|||
ARCH = arm64
|
||||
endif
|
||||
else
|
||||
UNAME_P := $(shell uname -p)
|
||||
UNAME_P := $(shell uname -m)
|
||||
ifneq ($(filter $(UNAME_P), i686 i386 x86_64),)
|
||||
ARCH = x86_64
|
||||
endif
|
||||
|
|
|
@ -41,6 +41,9 @@ task testContracts, "Build & run Codex Contract tests":
|
|||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
test "testIntegration"
|
||||
# use params to enable logging from the integration test executable
|
||||
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
|
||||
# "-d:chronicles_enabled_topics:integration:TRACE"
|
||||
|
||||
task build, "build codex binary":
|
||||
codexTask()
|
||||
|
|
|
@ -18,6 +18,8 @@ import ../protobuf/presence
|
|||
import ../peers
|
||||
|
||||
import ../../utils
|
||||
import ../../utils/exceptions
|
||||
import ../../utils/trackedfutures
|
||||
import ../../discovery
|
||||
import ../../stores/blockstore
|
||||
import ../../logutils
|
||||
|
@ -26,7 +28,7 @@ import ../../manifest
|
|||
logScope:
|
||||
topics = "codex discoveryengine advertiser"
|
||||
|
||||
declareGauge(codexInflightAdvertise, "inflight advertise requests")
|
||||
declareGauge(codex_inflight_advertise, "inflight advertise requests")
|
||||
|
||||
const
|
||||
DefaultConcurrentAdvertRequests = 10
|
||||
|
@ -42,7 +44,7 @@ type
|
|||
|
||||
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
|
||||
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
||||
advertiseTasks*: seq[Future[void]] # Advertise tasks
|
||||
trackedFutures*: TrackedFutures # Advertise tasks futures
|
||||
|
||||
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||
|
@ -70,20 +72,26 @@ proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
|
|||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
|
||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async.} =
|
||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
while b.advertiserRunning:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
try:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
|
||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||
|
||||
except CancelledError:
|
||||
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "failed to advertise blocks in local store", error = e.msgDetail
|
||||
|
||||
info "Exiting advertise task loop"
|
||||
|
||||
proc processQueueLoop(b: Advertiser) {.async.} =
|
||||
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
let
|
||||
|
@ -97,12 +105,12 @@ proc processQueueLoop(b: Advertiser) {.async.} =
|
|||
request = b.discovery.provide(cid)
|
||||
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codexInflightAdvertise.set(b.inFlightAdvReqs.len.int64)
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
await request
|
||||
|
||||
finally:
|
||||
b.inFlightAdvReqs.del(cid)
|
||||
codexInflightAdvertise.set(b.inFlightAdvReqs.len.int64)
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Advertise task cancelled"
|
||||
return
|
||||
|
@ -129,9 +137,13 @@ proc start*(b: Advertiser) {.async.} =
|
|||
|
||||
b.advertiserRunning = true
|
||||
for i in 0..<b.concurrentAdvReqs:
|
||||
b.advertiseTasks.add(processQueueLoop(b))
|
||||
let fut = b.processQueueLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
|
||||
b.trackedFutures.track(b.advertiseLocalStoreLoop)
|
||||
asyncSpawn b.advertiseLocalStoreLoop
|
||||
|
||||
proc stop*(b: Advertiser) {.async.} =
|
||||
## Stop the advertiser
|
||||
|
@ -145,19 +157,9 @@ proc stop*(b: Advertiser) {.async.} =
|
|||
b.advertiserRunning = false
|
||||
# Stop incoming tasks from callback and localStore loop
|
||||
b.localStore.onBlockStored = CidCallback.none
|
||||
if not b.advertiseLocalStoreLoop.isNil and not b.advertiseLocalStoreLoop.finished:
|
||||
trace "Awaiting advertise loop to stop"
|
||||
await b.advertiseLocalStoreLoop.cancelAndWait()
|
||||
trace "Advertise loop stopped"
|
||||
|
||||
# Clear up remaining tasks
|
||||
for task in b.advertiseTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting advertise task to stop"
|
||||
await task.cancelAndWait()
|
||||
trace "Advertise task stopped"
|
||||
|
||||
trace "Advertiser stopped"
|
||||
trace "Stopping advertise loop and tasks"
|
||||
await b.trackedFutures.cancelTracked()
|
||||
trace "Advertiser loop and tasks stopped"
|
||||
|
||||
proc new*(
|
||||
T: type Advertiser,
|
||||
|
@ -173,5 +175,6 @@ proc new*(
|
|||
discovery: discovery,
|
||||
concurrentAdvReqs: concurrentAdvReqs,
|
||||
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
||||
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep)
|
||||
|
|
|
@ -23,6 +23,7 @@ import ../network
|
|||
import ../peers
|
||||
|
||||
import ../../utils
|
||||
import ../../utils/trackedfutures
|
||||
import ../../discovery
|
||||
import ../../stores/blockstore
|
||||
import ../../logutils
|
||||
|
@ -31,7 +32,7 @@ import ../../manifest
|
|||
logScope:
|
||||
topics = "codex discoveryengine"
|
||||
|
||||
declareGauge(codexInflightDiscovery, "inflight discovery requests")
|
||||
declareGauge(codex_inflight_discovery, "inflight discovery requests")
|
||||
|
||||
const
|
||||
DefaultConcurrentDiscRequests = 10
|
||||
|
@ -50,12 +51,12 @@ type
|
|||
concurrentDiscReqs: int # Concurrent discovery requests
|
||||
discoveryLoop*: Future[void] # Discovery loop task handle
|
||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||
discoveryTasks*: seq[Future[void]] # Discovery tasks
|
||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||
minPeersPerBlock*: int # Max number of peers with block
|
||||
discoveryLoopSleep: Duration # Discovery loop sleep
|
||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
try:
|
||||
|
@ -66,13 +67,15 @@ proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
|||
except CatchableError as exc:
|
||||
warn "Exception in discovery loop", exc = exc.msg
|
||||
|
||||
logScope:
|
||||
sleep = b.discoveryLoopSleep
|
||||
wanted = b.pendingBlocks.len
|
||||
try:
|
||||
logScope:
|
||||
sleep = b.discoveryLoopSleep
|
||||
wanted = b.pendingBlocks.len
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
except CancelledError:
|
||||
discard # do not propagate as discoveryQueueLoop was asyncSpawned
|
||||
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Run discovery tasks
|
||||
##
|
||||
|
||||
|
@ -96,7 +99,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
.wait(DefaultDiscoveryTimeout)
|
||||
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
codexInflightDiscovery.set(b.inFlightDiscReqs.len.int64)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
let
|
||||
peers = await request
|
||||
|
||||
|
@ -110,12 +113,17 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
|
||||
finally:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codexInflightDiscovery.set(b.inFlightDiscReqs.len.int64)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery task runner", exc = exc.msg
|
||||
except Exception as e:
|
||||
# Raised by b.discovery.removeProvider somehow...
|
||||
# This should not be catchable, and we should never get here. Therefore,
|
||||
# raise a Defect.
|
||||
raiseAssert "Exception when removing provider"
|
||||
|
||||
info "Exiting discovery task runner"
|
||||
|
||||
|
@ -139,9 +147,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
|
|||
|
||||
b.discEngineRunning = true
|
||||
for i in 0..<b.concurrentDiscReqs:
|
||||
b.discoveryTasks.add(discoveryTaskLoop(b))
|
||||
let fut = b.discoveryTaskLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
b.discoveryLoop = discoveryQueueLoop(b)
|
||||
b.discoveryLoop = b.discoveryQueueLoop()
|
||||
b.trackedFutures.track(b.discoveryLoop)
|
||||
asyncSpawn b.discoveryLoop
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async.} =
|
||||
## Stop the discovery engine
|
||||
|
@ -153,16 +165,9 @@ proc stop*(b: DiscoveryEngine) {.async.} =
|
|||
return
|
||||
|
||||
b.discEngineRunning = false
|
||||
for task in b.discoveryTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting discovery task to stop"
|
||||
await task.cancelAndWait()
|
||||
trace "Discovery task stopped"
|
||||
|
||||
if not b.discoveryLoop.isNil and not b.discoveryLoop.finished:
|
||||
trace "Awaiting discovery loop to stop"
|
||||
await b.discoveryLoop.cancelAndWait()
|
||||
trace "Discovery loop stopped"
|
||||
trace "Stopping discovery loop and tasks"
|
||||
await b.trackedFutures.cancelTracked()
|
||||
trace "Discovery loop and tasks stopped"
|
||||
|
||||
trace "Discovery engine stopped"
|
||||
|
||||
|
@ -187,6 +192,7 @@ proc new*(
|
|||
pendingBlocks: pendingBlocks,
|
||||
concurrentDiscReqs: concurrentDiscReqs,
|
||||
discoveryQueue: newAsyncQueue[Cid](concurrentDiscReqs),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
||||
discoveryLoopSleep: discoveryLoopSleep,
|
||||
minPeersPerBlock: minPeersPerBlock)
|
||||
|
|
|
@ -22,6 +22,8 @@ import pkg/questionable
|
|||
import ../../stores/blockstore
|
||||
import ../../blocktype
|
||||
import ../../utils
|
||||
import ../../utils/exceptions
|
||||
import ../../utils/trackedfutures
|
||||
import ../../merkletree
|
||||
import ../../logutils
|
||||
import ../../manifest
|
||||
|
@ -70,7 +72,7 @@ type
|
|||
peers*: PeerCtxStore # Peers we're currently actively exchanging with
|
||||
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx] # Peers we're currently processing tasks for
|
||||
concurrentTasks: int # Number of concurrent peers we're serving at any given time
|
||||
blockexcTasks: seq[Future[void]] # Future to control blockexc task
|
||||
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
|
||||
blockexcRunning: bool # Indicates if the blockexc task is running
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
peersPerRequest: int # Max number of peers to request from
|
||||
|
@ -88,7 +90,7 @@ type
|
|||
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
|
||||
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
||||
|
||||
proc blockexcTaskRunner(b: BlockExcEngine): Future[void] {.gcsafe.}
|
||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).}
|
||||
|
||||
proc start*(b: BlockExcEngine) {.async.} =
|
||||
## Start the blockexc task
|
||||
|
@ -104,7 +106,9 @@ proc start*(b: BlockExcEngine) {.async.} =
|
|||
|
||||
b.blockexcRunning = true
|
||||
for i in 0..<b.concurrentTasks:
|
||||
b.blockexcTasks.add(blockexcTaskRunner(b))
|
||||
let fut = b.blockexcTaskRunner()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc stop*(b: BlockExcEngine) {.async.} =
|
||||
## Stop the blockexc blockexc
|
||||
|
@ -119,36 +123,32 @@ proc stop*(b: BlockExcEngine) {.async.} =
|
|||
return
|
||||
|
||||
b.blockexcRunning = false
|
||||
for task in b.blockexcTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting task to stop"
|
||||
await task.cancelAndWait()
|
||||
trace "Task stopped"
|
||||
await b.trackedFutures.cancelTracked()
|
||||
|
||||
trace "NetworkStore stopped"
|
||||
|
||||
proc sendWantHave(
|
||||
b: BlockExcEngine,
|
||||
address: BlockAddress, # pluralize this entire call chain, please
|
||||
addresses: seq[BlockAddress],
|
||||
excluded: seq[BlockExcPeerCtx],
|
||||
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
|
||||
trace "Sending wantHave request to peers", address
|
||||
for p in peers:
|
||||
if p notin excluded:
|
||||
if address notin p.peerHave:
|
||||
await b.network.request.sendWantList(
|
||||
p.id,
|
||||
@[address],
|
||||
wantType = WantType.WantHave) # we only want to know if the peer has the block
|
||||
let toAsk = addresses.filterIt(it notin p.peerHave)
|
||||
trace "Sending wantHave request", toAsk, peer = p.id
|
||||
await b.network.request.sendWantList(
|
||||
p.id,
|
||||
toAsk,
|
||||
wantType = WantType.WantHave)
|
||||
|
||||
proc sendWantBlock(
|
||||
b: BlockExcEngine,
|
||||
address: BlockAddress, # pluralize this entire call chain, please
|
||||
addresses: seq[BlockAddress],
|
||||
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
|
||||
trace "Sending wantBlock request to", peer = blockPeer.id, address
|
||||
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
|
||||
await b.network.request.sendWantList(
|
||||
blockPeer.id,
|
||||
@[address],
|
||||
addresses,
|
||||
wantType = WantType.WantBlock) # we want this remote to send us a block
|
||||
|
||||
proc monitorBlockHandle(
|
||||
|
@ -197,9 +197,10 @@ proc requestBlock*(
|
|||
if peer =? maybePeer:
|
||||
asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id)
|
||||
b.pendingBlocks.setInFlight(address)
|
||||
await b.sendWantBlock(address, peer)
|
||||
# TODO: Send more block addresses if at all sensible.
|
||||
await b.sendWantBlock(@[address], peer)
|
||||
codex_block_exchange_want_block_lists_sent.inc()
|
||||
await b.sendWantHave(address, @[peer], toSeq(b.peers))
|
||||
await b.sendWantHave(@[address], @[peer], toSeq(b.peers))
|
||||
codex_block_exchange_want_have_lists_sent.inc()
|
||||
|
||||
# Don't let timeouts bubble up. We can't be too broad here or we break
|
||||
|
@ -246,8 +247,7 @@ proc blockPresenceHandler*(
|
|||
|
||||
if wantCids.len > 0:
|
||||
trace "Peer has blocks in our wantList", peer, wantCount = wantCids.len
|
||||
discard await allFinished(
|
||||
wantCids.mapIt(b.sendWantBlock(it, peerCtx)))
|
||||
await b.sendWantBlock(wantCids, peerCtx)
|
||||
|
||||
# if none of the connected peers report our wants in their have list,
|
||||
# fire up discovery
|
||||
|
@ -565,16 +565,21 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
|||
|
||||
task.peerWants.keepItIf(it.address notin successAddresses)
|
||||
|
||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
|
||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} =
|
||||
## process tasks
|
||||
##
|
||||
|
||||
trace "Starting blockexc task runner"
|
||||
while b.blockexcRunning:
|
||||
let
|
||||
peerCtx = await b.taskQueue.pop()
|
||||
try:
|
||||
let
|
||||
peerCtx = await b.taskQueue.pop()
|
||||
|
||||
await b.taskHandler(peerCtx)
|
||||
await b.taskHandler(peerCtx)
|
||||
except CancelledError:
|
||||
break # do not propagate as blockexcTaskRunner was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "error running block exchange task", error = e.msgDetail
|
||||
|
||||
info "Exiting blockexc task runner"
|
||||
|
||||
|
@ -603,6 +608,7 @@ proc new*(
|
|||
network: network,
|
||||
wallet: wallet,
|
||||
concurrentTasks: concurrentTasks,
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
||||
discovery: discovery,
|
||||
advertiser: advertiser,
|
||||
|
|
|
@ -100,7 +100,6 @@ type
|
|||
name: "log-level" }: string
|
||||
|
||||
logFormat* {.
|
||||
hidden
|
||||
desc: "Specifies what kind of logs should be written to stdout (auto, " &
|
||||
"colors, nocolors, json)"
|
||||
defaultValueDesc: "auto"
|
||||
|
@ -316,7 +315,7 @@ type
|
|||
defaultValue: ValidationGroups.none
|
||||
name: "validator-groups"
|
||||
.}: Option[ValidationGroups]
|
||||
|
||||
|
||||
validatorGroupIndex* {.
|
||||
desc: "Slot validation group index"
|
||||
longDesc: "The value provided must be in the range " &
|
||||
|
|
|
@ -2,8 +2,10 @@ import contracts/requests
|
|||
import contracts/marketplace
|
||||
import contracts/market
|
||||
import contracts/interactions
|
||||
import contracts/provider
|
||||
|
||||
export requests
|
||||
export marketplace
|
||||
export market
|
||||
export interactions
|
||||
export provider
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import std/times
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/chronos
|
||||
import pkg/stint
|
||||
import ../clock
|
||||
|
@ -45,7 +46,11 @@ method start*(clock: OnChainClock) {.async.} =
|
|||
if clock.started:
|
||||
return
|
||||
|
||||
proc onBlock(_: ?!Block) =
|
||||
proc onBlock(blckResult: ?!Block) =
|
||||
if eventError =? blckResult.errorOption:
|
||||
error "There was an error in block subscription", msg=eventError.msg
|
||||
return
|
||||
|
||||
# ignore block parameter; hardhat may call this with pending blocks
|
||||
asyncSpawn clock.update()
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/sugar
|
||||
import pkg/ethers
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
|
@ -9,6 +7,7 @@ import ../logutils
|
|||
import ../market
|
||||
import ./marketplace
|
||||
import ./proofs
|
||||
import ./provider
|
||||
|
||||
export market
|
||||
|
||||
|
@ -20,6 +19,8 @@ type
|
|||
contract: Marketplace
|
||||
signer: Signer
|
||||
rewardRecipient: ?Address
|
||||
configuration: ?MarketplaceConfig
|
||||
|
||||
MarketSubscription = market.Subscription
|
||||
EventSubscription = ethers.Subscription
|
||||
OnChainMarketSubscription = ref object of MarketSubscription
|
||||
|
@ -48,6 +49,14 @@ template convertEthersError(body) =
|
|||
except EthersError as error:
|
||||
raiseMarketError(error.msgDetail)
|
||||
|
||||
proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} =
|
||||
without resolvedConfig =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
market.configuration = some fetchedConfig
|
||||
return fetchedConfig
|
||||
|
||||
return resolvedConfig
|
||||
|
||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError:
|
||||
|
@ -56,7 +65,7 @@ proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
|||
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
|
||||
|
||||
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
||||
let config = await market.contract.configuration()
|
||||
let config = await market.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
|
@ -65,18 +74,23 @@ method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
|||
|
||||
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.configuration()
|
||||
let config = await market.config()
|
||||
let period = config.proofs.period
|
||||
return Periodicity(seconds: period)
|
||||
|
||||
method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.configuration()
|
||||
let config = await market.config()
|
||||
return config.proofs.timeout
|
||||
|
||||
method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.configuration()
|
||||
return config.collateral.repairRewardPercentage
|
||||
|
||||
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.configuration()
|
||||
let config = await market.config()
|
||||
return config.proofs.downtime
|
||||
|
||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
|
@ -101,7 +115,7 @@ method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
|
|||
await market.approveFunds(request.price())
|
||||
discard await market.contract.requestStorage(request).confirm(1)
|
||||
|
||||
method getRequest(market: OnChainMarket,
|
||||
method getRequest*(market: OnChainMarket,
|
||||
id: RequestId): Future[?StorageRequest] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
|
@ -277,14 +291,14 @@ method canReserveSlot*(
|
|||
method subscribeRequests*(market: OnChainMarket,
|
||||
callback: OnRequest):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!StorageRequested) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!StorageRequested) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in Request subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId,
|
||||
value.ask,
|
||||
value.expiry)
|
||||
callback(event.requestId,
|
||||
event.ask,
|
||||
event.expiry)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
|
@ -293,12 +307,12 @@ method subscribeRequests*(market: OnChainMarket,
|
|||
method subscribeSlotFilled*(market: OnChainMarket,
|
||||
callback: OnSlotFilled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!SlotFilled) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!SlotFilled) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId, value.slotIndex)
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
|
@ -319,12 +333,12 @@ method subscribeSlotFilled*(market: OnChainMarket,
|
|||
method subscribeSlotFreed*(market: OnChainMarket,
|
||||
callback: OnSlotFreed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!SlotFreed) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!SlotFreed) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId, value.slotIndex)
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
|
@ -334,12 +348,12 @@ method subscribeSlotReservationsFull*(
|
|||
market: OnChainMarket,
|
||||
callback: OnSlotReservationsFull): Future[MarketSubscription] {.async.} =
|
||||
|
||||
proc onEvent(event: ?!SlotReservationsFull) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotReservationsFull subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId, value.slotIndex)
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||
|
@ -348,12 +362,12 @@ method subscribeSlotReservationsFull*(
|
|||
method subscribeFulfillment(market: OnChainMarket,
|
||||
callback: OnFulfillment):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!RequestFulfilled) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId)
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
|
@ -363,13 +377,13 @@ method subscribeFulfillment(market: OnChainMarket,
|
|||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!RequestFulfilled) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if value.requestId == requestId:
|
||||
callback(value.requestId)
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
|
@ -378,12 +392,12 @@ method subscribeFulfillment(market: OnChainMarket,
|
|||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
callback: OnRequestCancelled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!RequestCancelled) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId)
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
|
@ -393,13 +407,13 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
|||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!RequestCancelled) {.upraises:[].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if value.requestId == requestId:
|
||||
callback(value.requestId)
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
|
@ -408,12 +422,12 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
|||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!RequestFailed) {.upraises:[]} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.requestId)
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
|
@ -423,13 +437,13 @@ method subscribeRequestFailed*(market: OnChainMarket,
|
|||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!RequestFailed) {.upraises:[]} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if value.requestId == requestId:
|
||||
callback(value.requestId)
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
|
@ -438,12 +452,12 @@ method subscribeRequestFailed*(market: OnChainMarket,
|
|||
method subscribeProofSubmission*(market: OnChainMarket,
|
||||
callback: OnProofSubmitted):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ?!ProofSubmitted) {.upraises: [].} =
|
||||
without value =? event:
|
||||
error "The event object is not defined"
|
||||
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(value.id)
|
||||
callback(event.id)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||
|
@ -452,18 +466,49 @@ method subscribeProofSubmission*(market: OnChainMarket,
|
|||
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
await subscription.eventSubscription.unsubscribe()
|
||||
|
||||
method queryPastEvents*[T: MarketplaceEvent](
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket,
|
||||
_: type T,
|
||||
blocksAgo: int): Future[seq[T]] {.async.} =
|
||||
fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} =
|
||||
|
||||
convertEthersError:
|
||||
let contract = market.contract
|
||||
let provider = contract.provider
|
||||
return await market.contract.queryFilter(SlotFilled,
|
||||
fromBlock,
|
||||
BlockTag.latest)
|
||||
|
||||
let head = await provider.getBlockNumber()
|
||||
let fromBlock = BlockTag.init(head - blocksAgo.abs.u256)
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket,
|
||||
blocksAgo: int): Future[seq[SlotFilled]] {.async.} =
|
||||
|
||||
return await contract.queryFilter(T,
|
||||
fromBlock,
|
||||
BlockTag.latest)
|
||||
convertEthersError:
|
||||
let fromBlock =
|
||||
await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket,
|
||||
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} =
|
||||
|
||||
convertEthersError:
|
||||
let fromBlock =
|
||||
await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket,
|
||||
fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} =
|
||||
|
||||
convertEthersError:
|
||||
return await market.contract.queryFilter(StorageRequested,
|
||||
fromBlock,
|
||||
BlockTag.latest)
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket,
|
||||
blocksAgo: int): Future[seq[StorageRequested]] {.async.} =
|
||||
|
||||
convertEthersError:
|
||||
let fromBlock =
|
||||
await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
import pkg/ethers/provider
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
|
||||
import ../logutils
|
||||
|
||||
from ../clock import SecondsSince1970
|
||||
|
||||
logScope:
|
||||
topics = "marketplace onchain provider"
|
||||
|
||||
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
|
||||
raise newException(ProviderError, message)
|
||||
|
||||
proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag):
|
||||
Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} =
|
||||
without latestBlock =? await provider.getBlock(blockTag):
|
||||
raiseProviderError("Could not get latest block")
|
||||
|
||||
without latestBlockNumber =? latestBlock.number:
|
||||
raiseProviderError("Could not get latest block number")
|
||||
|
||||
return (latestBlockNumber, latestBlock.timestamp)
|
||||
|
||||
proc binarySearchFindClosestBlock(
|
||||
provider: Provider,
|
||||
epochTime: int,
|
||||
low: UInt256,
|
||||
high: UInt256): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||
let (_, lowTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.init(low))
|
||||
let (_, highTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.init(high))
|
||||
if abs(lowTimestamp.truncate(int) - epochTime) <
|
||||
abs(highTimestamp.truncate(int) - epochTime):
|
||||
return low
|
||||
else:
|
||||
return high
|
||||
|
||||
proc binarySearchBlockNumberForEpoch(
|
||||
provider: Provider,
|
||||
epochTime: UInt256,
|
||||
latestBlockNumber: UInt256,
|
||||
earliestBlockNumber: UInt256): Future[UInt256]
|
||||
{.async: (raises: [ProviderError]).} =
|
||||
var low = earliestBlockNumber
|
||||
var high = latestBlockNumber
|
||||
|
||||
while low <= high:
|
||||
if low == 0 and high == 0:
|
||||
return low
|
||||
let mid = (low + high) div 2
|
||||
let (midBlockNumber, midBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
|
||||
|
||||
if midBlockTimestamp < epochTime:
|
||||
low = mid + 1
|
||||
elif midBlockTimestamp > epochTime:
|
||||
high = mid - 1
|
||||
else:
|
||||
return midBlockNumber
|
||||
# NOTICE that by how the binary search is implemented, when it finishes
|
||||
# low is always greater than high - this is why we use high, where
|
||||
# intuitively we would use low:
|
||||
await provider.binarySearchFindClosestBlock(
|
||||
epochTime.truncate(int), low=high, high=low)
|
||||
|
||||
proc blockNumberForEpoch*(
|
||||
provider: Provider,
|
||||
epochTime: SecondsSince1970): Future[UInt256]
|
||||
{.async: (raises: [ProviderError]).} =
|
||||
let epochTimeUInt256 = epochTime.u256
|
||||
let (latestBlockNumber, latestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
let (earliestBlockNumber, earliestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.earliest)
|
||||
|
||||
# Initially we used the average block time to predict
|
||||
# the number of blocks we need to look back in order to find
|
||||
# the block number corresponding to the given epoch time.
|
||||
# This estimation can be highly inaccurate if block time
|
||||
# was changing in the past or is fluctuating and therefore
|
||||
# we used that information initially only to find out
|
||||
# if the available history is long enough to perform effective search.
|
||||
# It turns out we do not have to do that. There is an easier way.
|
||||
#
|
||||
# First we check if the given epoch time equals the timestamp of either
|
||||
# the earliest or the latest block. If it does, we just return the
|
||||
# block number of that block.
|
||||
#
|
||||
# Otherwise, if the earliest available block is not the genesis block,
|
||||
# we should check the timestamp of that earliest block and if it is greater
|
||||
# than the epoch time, we should issue a warning and return
|
||||
# that earliest block number.
|
||||
# In all other cases, thus when the earliest block is not the genesis
|
||||
# block but its timestamp is not greater than the requested epoch time, or
|
||||
# if the earliest available block is the genesis block,
|
||||
# (which means we have the whole history available), we should proceed with
|
||||
# the binary search.
|
||||
#
|
||||
# Additional benefit of this method is that we do not have to rely
|
||||
# on the average block time, which not only makes the whole thing
|
||||
# more reliable, but also easier to test.
|
||||
|
||||
# Are lucky today?
|
||||
if earliestBlockTimestamp == epochTimeUInt256:
|
||||
return earliestBlockNumber
|
||||
if latestBlockTimestamp == epochTimeUInt256:
|
||||
return latestBlockNumber
|
||||
|
||||
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
|
||||
let availableHistoryInDays =
|
||||
(latestBlockTimestamp - earliestBlockTimestamp) div
|
||||
1.days.secs.u256
|
||||
warn "Short block history detected.", earliestBlockTimestamp =
|
||||
earliestBlockTimestamp, days = availableHistoryInDays
|
||||
return earliestBlockNumber
|
||||
|
||||
return await provider.binarySearchBlockNumberForEpoch(
|
||||
epochTimeUInt256, latestBlockNumber, earliestBlockNumber)
|
||||
|
||||
proc pastBlockTag*(provider: Provider,
|
||||
blocksAgo: int):
|
||||
Future[BlockTag] {.async: (raises: [ProviderError]).} =
|
||||
let head = await provider.getBlockNumber()
|
||||
return BlockTag.init(head - blocksAgo.abs.u256)
|
|
@ -49,6 +49,7 @@ type
|
|||
Failed
|
||||
Paid
|
||||
Cancelled
|
||||
Repair
|
||||
|
||||
proc `==`*(x, y: Nonce): bool {.borrow.}
|
||||
proc `==`*(x, y: RequestId): bool {.borrow.}
|
||||
|
|
|
@ -98,7 +98,6 @@ import pkg/questionable/results
|
|||
import ./utils/json except formatIt # TODO: remove exception?
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/upraises
|
||||
|
||||
export byteutils
|
||||
export chronicles except toJson, formatIt, `%`
|
||||
|
@ -107,7 +106,6 @@ export sequtils
|
|||
export json except formatIt
|
||||
export strutils
|
||||
export sugar
|
||||
export upraises
|
||||
export results
|
||||
|
||||
func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
|
||||
|
@ -184,12 +182,12 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
|||
let v = opts.map(opt => opt.formatJsonOption)
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) =
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} =
|
||||
var it {.inject, used.}: T
|
||||
let v = val.map(it => body)
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: T) {.raises:[ValueError, IOError].} =
|
||||
var it {.inject, used.}: T = val
|
||||
let v = body
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
@ -220,12 +218,12 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
|||
let v = opts.map(opt => opt.formatTextLineOption)
|
||||
setProperty(r, key, v.formatTextLineSeq)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) =
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} =
|
||||
var it {.inject, used.}: T
|
||||
let v = val.map(it => body)
|
||||
setProperty(r, key, v.formatTextLineSeq)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.raises:[ValueError, IOError].} =
|
||||
var it {.inject, used.}: T = val
|
||||
let v = body
|
||||
setProperty(r, key, v)
|
||||
|
|
|
@ -67,6 +67,9 @@ method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
|||
method proofTimeout*(market: Market): Future[UInt256] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
|
@ -243,8 +246,27 @@ method subscribeProofSubmission*(market: Market,
|
|||
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastEvents*[T: MarketplaceEvent](
|
||||
market: Market,
|
||||
_: type T,
|
||||
blocksAgo: int): Future[seq[T]] {.base, async.} =
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market,
|
||||
fromBlock: BlockTag): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market,
|
||||
blocksAgo: int): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market,
|
||||
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: Market,
|
||||
fromBlock: BlockTag): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: Market,
|
||||
blocksAgo: int): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
|
|
@ -758,12 +758,12 @@ proc stop*(self: CodexNodeRef) {.async.} =
|
|||
if hostContracts =? self.contracts.host:
|
||||
await hostContracts.stop()
|
||||
|
||||
if not self.clock.isNil:
|
||||
await self.clock.stop()
|
||||
|
||||
if validatorContracts =? self.contracts.validator:
|
||||
await validatorContracts.stop()
|
||||
|
||||
if not self.clock.isNil:
|
||||
await self.clock.stop()
|
||||
|
||||
if not self.networkStore.isNil:
|
||||
await self.networkStore.close
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@ import ./sales/statemachine
|
|||
import ./sales/slotqueue
|
||||
import ./sales/states/preparing
|
||||
import ./sales/states/unknown
|
||||
import ./utils/then
|
||||
import ./utils/trackedfutures
|
||||
import ./utils/exceptions
|
||||
|
||||
## Sales holds a list of available storage that it may sell.
|
||||
##
|
||||
|
@ -325,7 +325,7 @@ proc onSlotFreed(sales: Sales,
|
|||
|
||||
trace "slot freed, adding to queue"
|
||||
|
||||
proc addSlotToQueue() {.async.} =
|
||||
proc addSlotToQueue() {.async: (raises: []).} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
@ -336,25 +336,24 @@ proc onSlotFreed(sales: Sales,
|
|||
trace "no existing request metadata, getting request info from contract"
|
||||
# if there's no existing slot for that request, retrieve the request
|
||||
# from the contract.
|
||||
without request =? await market.getRequest(requestId):
|
||||
error "unknown request in contract"
|
||||
return
|
||||
try:
|
||||
without request =? await market.getRequest(requestId):
|
||||
error "unknown request in contract"
|
||||
return
|
||||
|
||||
found = SlotQueueItem.init(request, slotIndex.truncate(uint16))
|
||||
found = SlotQueueItem.init(request, slotIndex.truncate(uint16))
|
||||
except CancelledError:
|
||||
discard # do not propagate as addSlotToQueue was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "failed to get request from contract and add slots to queue",
|
||||
error = e.msgDetail
|
||||
|
||||
if err =? queue.push(found).errorOption:
|
||||
raise err
|
||||
error "failed to push slot items to queue", error = err.msgDetail
|
||||
|
||||
addSlotToQueue()
|
||||
.track(sales)
|
||||
.catch(proc(err: ref CatchableError) =
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists"
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running"
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
)
|
||||
let fut = addSlotToQueue()
|
||||
sales.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc subscribeRequested(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
|
@ -482,7 +481,7 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
|
|||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot filled events", msg = e.msg
|
||||
|
||||
proc startSlotQueue(sales: Sales) {.async.} =
|
||||
proc startSlotQueue(sales: Sales) =
|
||||
let slotQueue = sales.context.slotQueue
|
||||
let reservations = sales.context.reservations
|
||||
|
||||
|
@ -491,7 +490,7 @@ proc startSlotQueue(sales: Sales) {.async.} =
|
|||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||
sales.processSlot(item, done)
|
||||
|
||||
asyncSpawn slotQueue.start()
|
||||
slotQueue.start()
|
||||
|
||||
proc onAvailabilityAdded(availability: Availability) {.async.} =
|
||||
await sales.onAvailabilityAdded(availability)
|
||||
|
@ -518,7 +517,7 @@ proc unsubscribe(sales: Sales) {.async.} =
|
|||
|
||||
proc start*(sales: Sales) {.async.} =
|
||||
await sales.load()
|
||||
await sales.startSlotQueue()
|
||||
sales.startSlotQueue()
|
||||
await sales.subscribe()
|
||||
sales.running = true
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import ../rng
|
|||
import ../utils
|
||||
import ../contracts/requests
|
||||
import ../utils/asyncheapqueue
|
||||
import ../utils/then
|
||||
import ../utils/trackedfutures
|
||||
|
||||
logScope:
|
||||
|
@ -324,7 +323,7 @@ proc addWorker(self: SlotQueue): ?!void =
|
|||
|
||||
let worker = SlotQueueWorker.init()
|
||||
try:
|
||||
discard worker.doneProcessing.track(self)
|
||||
self.trackedFutures.track(worker.doneProcessing)
|
||||
self.workers.addLastNoWait(worker)
|
||||
except AsyncQueueFullError:
|
||||
return failure("failed to add worker, worker queue full")
|
||||
|
@ -333,7 +332,7 @@ proc addWorker(self: SlotQueue): ?!void =
|
|||
|
||||
proc dispatch(self: SlotQueue,
|
||||
worker: SlotQueueWorker,
|
||||
item: SlotQueueItem) {.async.} =
|
||||
item: SlotQueueItem) {.async: (raises: []).} =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
|
@ -344,7 +343,7 @@ proc dispatch(self: SlotQueue,
|
|||
|
||||
if onProcessSlot =? self.onProcessSlot:
|
||||
try:
|
||||
discard worker.doneProcessing.track(self)
|
||||
self.trackedFutures.track(worker.doneProcessing)
|
||||
await onProcessSlot(item, worker.doneProcessing)
|
||||
await worker.doneProcessing
|
||||
|
||||
|
@ -380,22 +379,7 @@ proc clearSeenFlags*(self: SlotQueue) =
|
|||
|
||||
trace "all 'seen' flags cleared"
|
||||
|
||||
proc start*(self: SlotQueue) {.async.} =
|
||||
if self.running:
|
||||
return
|
||||
|
||||
trace "starting slot queue"
|
||||
|
||||
self.running = true
|
||||
|
||||
# must be called in `start` to avoid sideeffects in `new`
|
||||
self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers)
|
||||
|
||||
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
|
||||
# task, a new worker will be pushed to the queue
|
||||
for i in 0..<self.maxWorkers:
|
||||
if err =? self.addWorker().errorOption:
|
||||
error "start: error adding new worker", error = err.msg
|
||||
proc run(self: SlotQueue) {.async: (raises: []).} =
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
|
@ -405,8 +389,8 @@ proc start*(self: SlotQueue) {.async.} =
|
|||
# block until unpaused is true/fired, ie wait for queue to be unpaused
|
||||
await self.unpaused.wait()
|
||||
|
||||
let worker = await self.workers.popFirst().track(self) # if workers saturated, wait here for new workers
|
||||
let item = await self.queue.pop().track(self) # if queue empty, wait here for new items
|
||||
let worker = await self.workers.popFirst() # if workers saturated, wait here for new workers
|
||||
let item = await self.queue.pop() # if queue empty, wait here for new items
|
||||
|
||||
logScope:
|
||||
reqId = item.requestId
|
||||
|
@ -434,19 +418,38 @@ proc start*(self: SlotQueue) {.async.} =
|
|||
|
||||
trace "processing item"
|
||||
|
||||
self.dispatch(worker, item)
|
||||
.track(self)
|
||||
.catch(proc (e: ref CatchableError) =
|
||||
error "Unknown error dispatching worker", error = e.msg
|
||||
)
|
||||
let fut = self.dispatch(worker, item)
|
||||
self.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
await sleepAsync(1.millis) # poll
|
||||
except CancelledError:
|
||||
trace "slot queue cancelled"
|
||||
return
|
||||
break
|
||||
except CatchableError as e: # raised from self.queue.pop() or self.workers.pop()
|
||||
warn "slot queue error encountered during processing", error = e.msg
|
||||
|
||||
proc start*(self: SlotQueue) =
|
||||
if self.running:
|
||||
return
|
||||
|
||||
trace "starting slot queue"
|
||||
|
||||
self.running = true
|
||||
|
||||
# must be called in `start` to avoid sideeffects in `new`
|
||||
self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers)
|
||||
|
||||
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
|
||||
# task, a new worker will be pushed to the queue
|
||||
for i in 0..<self.maxWorkers:
|
||||
if err =? self.addWorker().errorOption:
|
||||
error "start: error adding new worker", error = err.msg
|
||||
|
||||
let fut = self.run()
|
||||
self.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc stop*(self: SlotQueue) {.async.} =
|
||||
if not self.running:
|
||||
return
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import pkg/stint
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../statemachine
|
||||
|
@ -27,13 +28,23 @@ method onFailed*(state: SaleFilling, request: StorageRequest): ?State =
|
|||
method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
without (collateral =? data.request.?ask.?collateral):
|
||||
without (fullCollateral =? data.request.?ask.?collateral):
|
||||
raiseAssert "Request not set"
|
||||
|
||||
logScope:
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
|
||||
var collateral: UInt256
|
||||
|
||||
if slotState == SlotState.Repair:
|
||||
# When repairing the node gets "discount" on the collateral that it needs to
|
||||
let repairRewardPercentage = (await market.repairRewardPercentage).u256
|
||||
collateral = fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256)
|
||||
else:
|
||||
collateral = fullCollateral
|
||||
|
||||
debug "Filling slot"
|
||||
try:
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
|
|
|
@ -49,7 +49,7 @@ method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
|
|||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let state = await market.slotState(slotId)
|
||||
if state != SlotState.Free:
|
||||
if state != SlotState.Free and state != SlotState.Repair:
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: false))
|
||||
|
||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||
|
|
|
@ -16,6 +16,7 @@ logScope:
|
|||
topics = "marketplace sales proving"
|
||||
|
||||
type
|
||||
SlotFreedError* = object of CatchableError
|
||||
SlotNotFilledError* = object of CatchableError
|
||||
SaleProving* = ref object of ErrorHandlingState
|
||||
loop: Future[void]
|
||||
|
@ -82,6 +83,10 @@ proc proveLoop(
|
|||
of SlotState.Cancelled:
|
||||
debug "Slot reached cancelled state"
|
||||
# do nothing, let onCancelled callback take care of it
|
||||
of SlotState.Repair:
|
||||
warn "Slot was forcible freed"
|
||||
let message = "Slot was forcible freed and host was removed from its hosting"
|
||||
raise newException(SlotFreedError, message)
|
||||
of SlotState.Failed:
|
||||
debug "Slot reached failed state"
|
||||
# do nothing, let onFailed callback take care of it
|
||||
|
|
|
@ -5,6 +5,7 @@ import ./filled
|
|||
import ./finished
|
||||
import ./failed
|
||||
import ./errored
|
||||
import ./proving
|
||||
import ./cancelled
|
||||
import ./payout
|
||||
|
||||
|
@ -38,7 +39,7 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
|
|||
case slotState
|
||||
of SlotState.Free:
|
||||
let error = newException(UnexpectedSlotError,
|
||||
"slot state on chain should not be 'free'")
|
||||
"Slot state on chain should not be 'free'")
|
||||
return some State(SaleErrored(error: error))
|
||||
of SlotState.Filled:
|
||||
return some State(SaleFilled())
|
||||
|
@ -50,3 +51,7 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
|
|||
return some State(SaleFailed())
|
||||
of SlotState.Cancelled:
|
||||
return some State(SaleCancelled())
|
||||
of SlotState.Repair:
|
||||
let error = newException(SlotFreedError,
|
||||
"Slot was forcible freed and host was removed from its hosting")
|
||||
return some State(SaleErrored(error: error))
|
||||
|
|
|
@ -58,7 +58,7 @@ proc deleteExpiredBlock(self: BlockMaintainer, cid: Cid): Future[void] {.async.}
|
|||
if isErr (await self.repoStore.delBlock(cid)):
|
||||
trace "Unable to delete block from repoStore"
|
||||
|
||||
proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[void] {.async} =
|
||||
proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[void] {.async.} =
|
||||
if be.expiry < self.clock.now:
|
||||
await self.deleteExpiredBlock(be.cid)
|
||||
else:
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
import std/sugar
|
||||
import pkg/questionable
|
||||
import pkg/chronos
|
||||
import pkg/upraises
|
||||
import ../logutils
|
||||
import ./then
|
||||
import ./trackedfutures
|
||||
|
||||
push: {.upraises:[].}
|
||||
{.push raises:[].}
|
||||
|
||||
type
|
||||
Machine* = ref object of RootObj
|
||||
|
@ -17,7 +15,7 @@ type
|
|||
trackedFutures: TrackedFutures
|
||||
State* = ref object of RootObj
|
||||
Query*[T] = proc(state: State): T
|
||||
Event* = proc(state: State): ?State {.gcsafe, upraises:[].}
|
||||
Event* = proc(state: State): ?State {.gcsafe, raises:[].}
|
||||
|
||||
logScope:
|
||||
topics = "statemachine"
|
||||
|
@ -58,31 +56,32 @@ proc onError(machine: Machine, error: ref CatchableError): Event =
|
|||
return proc (state: State): ?State =
|
||||
state.onError(error)
|
||||
|
||||
proc run(machine: Machine, state: State) {.async.} =
|
||||
if next =? await state.run(machine):
|
||||
machine.schedule(Event.transition(state, next))
|
||||
proc run(machine: Machine, state: State) {.async: (raises:[]).} =
|
||||
try:
|
||||
if next =? await state.run(machine):
|
||||
machine.schedule(Event.transition(state, next))
|
||||
except CancelledError:
|
||||
discard # do not propagate
|
||||
except CatchableError as e:
|
||||
machine.schedule(machine.onError(e))
|
||||
|
||||
proc scheduler(machine: Machine) {.async, gcsafe.} =
|
||||
var running: Future[void]
|
||||
proc scheduler(machine: Machine) {.async: (raises: []).} =
|
||||
var running: Future[void].Raising([])
|
||||
while machine.started:
|
||||
let event = await machine.scheduled.get().track(machine)
|
||||
if next =? event(machine.state):
|
||||
if not running.isNil and not running.finished:
|
||||
trace "cancelling current state", state = $machine.state
|
||||
await running.cancelAndWait()
|
||||
let fromState = if machine.state.isNil: "<none>" else: $machine.state
|
||||
machine.state = next
|
||||
debug "enter state", state = fromState & " => " & $machine.state
|
||||
running = machine.run(machine.state)
|
||||
|
||||
proc catchError(err: ref CatchableError) {.gcsafe.} =
|
||||
trace "error caught in state.run, calling state.onError", state = $machine.state
|
||||
machine.schedule(machine.onError(err))
|
||||
|
||||
running
|
||||
.track(machine)
|
||||
.cancelled(proc() = trace "state.run cancelled, swallowing", state = $machine.state)
|
||||
.catch(catchError)
|
||||
try:
|
||||
let event = await machine.scheduled.get()
|
||||
if next =? event(machine.state):
|
||||
if not running.isNil and not running.finished:
|
||||
trace "cancelling current state", state = $machine.state
|
||||
await running.cancelAndWait()
|
||||
let fromState = if machine.state.isNil: "<none>" else: $machine.state
|
||||
machine.state = next
|
||||
debug "enter state", state = fromState & " => " & $machine.state
|
||||
running = machine.run(machine.state)
|
||||
machine.trackedFutures.track(running)
|
||||
asyncSpawn running
|
||||
except CancelledError:
|
||||
break # do not propagate bc it is asyncSpawned
|
||||
|
||||
proc start*(machine: Machine, initialState: State) =
|
||||
if machine.started:
|
||||
|
@ -92,13 +91,10 @@ proc start*(machine: Machine, initialState: State) =
|
|||
machine.scheduled = newAsyncQueue[Event]()
|
||||
|
||||
machine.started = true
|
||||
try:
|
||||
discard machine.scheduler().track(machine)
|
||||
machine.schedule(Event.transition(machine.state, initialState))
|
||||
except CancelledError as e:
|
||||
discard
|
||||
except CatchableError as e:
|
||||
error("Error in scheduler", error = e.msg)
|
||||
let fut = machine.scheduler()
|
||||
machine.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
machine.schedule(Event.transition(machine.state, initialState))
|
||||
|
||||
proc stop*(machine: Machine) {.async.} =
|
||||
if not machine.started:
|
||||
|
|
|
@ -1,207 +0,0 @@
|
|||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
|
||||
# Similar to JavaScript's Promise API, `.then` and `.catch` can be used to
|
||||
# handle results and errors of async `Futures` within a synchronous closure.
|
||||
# They can be used as an alternative to `asyncSpawn` which does not return a
|
||||
# value and will raise a `FutureDefect` if there are unhandled errors
|
||||
# encountered. Both `.then` and `.catch` act as callbacks that do not block the
|
||||
# synchronous closure's flow.
|
||||
|
||||
# `.then` is called when the `Future` is successfully completed and can be
|
||||
# chained as many times as desired, calling each `.then` callback in order. When
|
||||
# the `Future` returns `Result[T, ref CatchableError]` (or `?!T`), the value
|
||||
# called in the `.then` callback will be unpacked from the `Result` as a
|
||||
# convenience. In other words, for `Future[?!T]`, the `.then` callback will take
|
||||
# a single parameter `T`. See `tests/utils/testthen.nim` for more examples. To
|
||||
# allow for chaining, `.then` returns its future. If the future is already
|
||||
# complete, the `.then` callback will be executed immediately.
|
||||
|
||||
# `.catch` is called when the `Future` fails. In the case when the `Future`
|
||||
# returns a `Result[T, ref CatchableError` (or `?!T`), `.catch` will be called
|
||||
# if the `Result` contains an error. If the `Future` is already failed (or
|
||||
# `Future[?!T]` contains an error), the `.catch` callback will be executed
|
||||
# immediately.
|
||||
|
||||
# `.cancelled` is called when the `Future` is cancelled. If the `Future` is
|
||||
# already cancelled, the `.cancelled` callback will be executed immediately.
|
||||
|
||||
# More info on JavaScript's Promise API can be found at:
|
||||
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise
|
||||
|
||||
runnableExamples:
|
||||
proc asyncProc(): Future[int] {.async.} =
|
||||
await sleepAsync(1.millis)
|
||||
return 1
|
||||
|
||||
asyncProc()
|
||||
.then(proc(i: int) = echo "returned ", i)
|
||||
.catch(proc(e: ref CatchableError) = doAssert false, "will not be triggered")
|
||||
|
||||
# outputs "returned 1"
|
||||
|
||||
proc asyncProcWithError(): Future[int] {.async.} =
|
||||
await sleepAsync(1.millis)
|
||||
raise newException(ValueError, "some error")
|
||||
|
||||
asyncProcWithError()
|
||||
.then(proc(i: int) = doAssert false, "will not be triggered")
|
||||
.catch(proc(e: ref CatchableError) = echo "errored: ", e.msg)
|
||||
|
||||
# outputs "errored: some error"
|
||||
|
||||
type
|
||||
OnSuccess*[T] = proc(val: T) {.gcsafe, upraises: [].}
|
||||
OnError* = proc(err: ref CatchableError) {.gcsafe, upraises: [].}
|
||||
OnCancelled* = proc() {.gcsafe, upraises: [].}
|
||||
|
||||
proc ignoreError(err: ref CatchableError) = discard
|
||||
proc ignoreCancelled() = discard
|
||||
|
||||
template handleFinished(future: FutureBase,
|
||||
onError: OnError,
|
||||
onCancelled: OnCancelled) =
|
||||
|
||||
if not future.finished:
|
||||
return
|
||||
|
||||
if future.cancelled:
|
||||
onCancelled()
|
||||
return
|
||||
|
||||
if future.failed:
|
||||
onError(future.error)
|
||||
return
|
||||
|
||||
proc then*(future: Future[void], onSuccess: OnSuccess[void]): Future[void] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
onSuccess()
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc then*[T](future: Future[T], onSuccess: OnSuccess[T]): Future[T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
|
||||
if val =? future.read.catch:
|
||||
onSuccess(val)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc then*[T](future: Future[?!T], onSuccess: OnSuccess[T]): Future[?!T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
|
||||
try:
|
||||
if val =? future.read:
|
||||
onSuccess(val)
|
||||
except CatchableError as e:
|
||||
ignoreError(e)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc then*(future: Future[?!void], onSuccess: OnSuccess[void]): Future[?!void] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
|
||||
try:
|
||||
if future.read.isOk:
|
||||
onSuccess()
|
||||
except CatchableError as e:
|
||||
ignoreError(e)
|
||||
return
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc catch*[T](future: Future[T], onError: OnError) =
|
||||
|
||||
if future.isNil: return
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(onError, ignoreCancelled)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
|
||||
proc catch*[T](future: Future[?!T], onError: OnError) =
|
||||
|
||||
if future.isNil: return
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(onError, ignoreCancelled)
|
||||
|
||||
try:
|
||||
if err =? future.read.errorOption:
|
||||
onError(err)
|
||||
except CatchableError as e:
|
||||
onError(e)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
|
||||
proc cancelled*[T](future: Future[T], onCancelled: OnCancelled): Future[T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, onCancelled)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
onCancelled()
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc cancelled*[T](future: Future[?!T], onCancelled: OnCancelled): Future[?!T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, onCancelled)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
onCancelled()
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
|
@ -30,13 +30,13 @@ proc new*(T: type Timer, timerName = "Unnamed Timer"): Timer =
|
|||
## Create a new Timer intance with the given name
|
||||
Timer(name: timerName)
|
||||
|
||||
proc timerLoop(timer: Timer) {.async.} =
|
||||
proc timerLoop(timer: Timer) {.async: (raises: []).} =
|
||||
try:
|
||||
while true:
|
||||
await timer.callback()
|
||||
await sleepAsync(timer.interval)
|
||||
except CancelledError:
|
||||
raise
|
||||
discard # do not propagate as timerLoop is asyncSpawned
|
||||
except CatchableError as exc:
|
||||
error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg
|
||||
|
||||
|
@ -47,9 +47,10 @@ method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsaf
|
|||
timer.callback = callback
|
||||
timer.interval = interval
|
||||
timer.loopFuture = timerLoop(timer)
|
||||
asyncSpawn timer.loopFuture
|
||||
|
||||
method stop*(timer: Timer) {.async, base.} =
|
||||
if timer.loopFuture != nil:
|
||||
if timer.loopFuture != nil and not timer.loopFuture.finished:
|
||||
trace "Timer stopping: ", name=timer.name
|
||||
await timer.loopFuture.cancelAndWait()
|
||||
timer.loopFuture = nil
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import std/sugar
|
||||
import std/tables
|
||||
import pkg/chronos
|
||||
|
||||
import ../logutils
|
||||
import ../utils/then
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
TrackedFutures* = ref object
|
||||
|
@ -19,34 +19,28 @@ proc removeFuture(self: TrackedFutures, future: FutureBase) =
|
|||
if not self.cancelling and not future.isNil:
|
||||
self.futures.del(future.id)
|
||||
|
||||
proc track*[T](self: TrackedFutures, fut: Future[T]): Future[T] =
|
||||
proc track*[T](self: TrackedFutures, fut: Future[T]) =
|
||||
if self.cancelling:
|
||||
return fut
|
||||
return
|
||||
|
||||
self.futures[fut.id] = FutureBase(fut)
|
||||
|
||||
fut
|
||||
.then((val: T) => self.removeFuture(fut))
|
||||
.cancelled(() => self.removeFuture(fut))
|
||||
.catch((e: ref CatchableError) => self.removeFuture(fut))
|
||||
proc cb(udata: pointer) =
|
||||
self.removeFuture(fut)
|
||||
|
||||
return fut
|
||||
fut.addCallback(cb)
|
||||
|
||||
proc track*[T, U](future: Future[T], self: U): Future[T] =
|
||||
## Convenience method that allows chaining future, eg:
|
||||
## `await someFut().track(sales)`, where `sales` has declared a
|
||||
## `trackedFutures` property.
|
||||
self.trackedFutures.track(future)
|
||||
|
||||
proc cancelTracked*(self: TrackedFutures) {.async.} =
|
||||
proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} =
|
||||
self.cancelling = true
|
||||
|
||||
trace "cancelling tracked futures"
|
||||
|
||||
var cancellations: seq[FutureBase]
|
||||
for future in self.futures.values:
|
||||
if not future.isNil and not future.finished:
|
||||
trace "cancelling tracked future", id = future.id
|
||||
await future.cancelAndWait()
|
||||
cancellations.add future.cancelAndWait()
|
||||
|
||||
await noCancel allFutures cancellations
|
||||
|
||||
self.futures.clear()
|
||||
self.cancelling = false
|
||||
|
|
|
@ -23,6 +23,9 @@ type
|
|||
proofTimeout: UInt256
|
||||
config: ValidationConfig
|
||||
|
||||
const
|
||||
MaxStorageRequestDuration = 30.days
|
||||
|
||||
logScope:
|
||||
topics = "codex validator"
|
||||
|
||||
|
@ -56,15 +59,15 @@ func maxSlotsConstraintRespected(validation: Validation): bool =
|
|||
validation.slots.len < validation.config.maxSlots
|
||||
|
||||
func shouldValidateSlot(validation: Validation, slotId: SlotId): bool =
|
||||
if (validationGroups =? validation.config.groups):
|
||||
(groupIndexForSlotId(slotId, validationGroups) ==
|
||||
validation.config.groupIndex) and
|
||||
validation.maxSlotsConstraintRespected
|
||||
else:
|
||||
validation.maxSlotsConstraintRespected
|
||||
without validationGroups =? validation.config.groups:
|
||||
return true
|
||||
groupIndexForSlotId(slotId, validationGroups) ==
|
||||
validation.config.groupIndex
|
||||
|
||||
proc subscribeSlotFilled(validation: Validation) {.async.} =
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
||||
if not validation.maxSlotsConstraintRespected:
|
||||
return
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
if validation.shouldValidateSlot(slotId):
|
||||
trace "Adding slot", slotId
|
||||
|
@ -78,7 +81,7 @@ proc removeSlotsThatHaveEnded(validation: Validation) {.async.} =
|
|||
for slotId in slots:
|
||||
let state = await validation.market.slotState(slotId)
|
||||
if state != SlotState.Filled:
|
||||
trace "Removing slot", slotId
|
||||
trace "Removing slot", slotId, slotState = state
|
||||
ended.incl(slotId)
|
||||
validation.slots.excl(ended)
|
||||
|
||||
|
@ -106,7 +109,7 @@ proc markProofsAsMissing(validation: Validation) {.async.} =
|
|||
let previousPeriod = validation.getCurrentPeriod() - 1
|
||||
await validation.markProofAsMissing(slotId, previousPeriod)
|
||||
|
||||
proc run(validation: Validation) {.async.} =
|
||||
proc run(validation: Validation) {.async: (raises: []).} =
|
||||
trace "Validation started"
|
||||
try:
|
||||
while true:
|
||||
|
@ -115,18 +118,42 @@ proc run(validation: Validation) {.async.} =
|
|||
await validation.markProofsAsMissing()
|
||||
except CancelledError:
|
||||
trace "Validation stopped"
|
||||
discard
|
||||
discard # do not propagate as run is asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "Validation failed", msg = e.msg
|
||||
|
||||
proc epochForDurationBackFromNow(validation: Validation,
|
||||
duration: Duration): SecondsSince1970 =
|
||||
return validation.clock.now - duration.secs
|
||||
|
||||
proc restoreHistoricalState(validation: Validation) {.async.} =
|
||||
trace "Restoring historical state..."
|
||||
let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration)
|
||||
let slotFilledEvents = await validation.market.queryPastSlotFilledEvents(
|
||||
fromTime = startTimeEpoch)
|
||||
for event in slotFilledEvents:
|
||||
if not validation.maxSlotsConstraintRespected:
|
||||
break
|
||||
let slotId = slotId(event.requestId, event.slotIndex)
|
||||
let slotState = await validation.market.slotState(slotId)
|
||||
if slotState == SlotState.Filled and validation.shouldValidateSlot(slotId):
|
||||
trace "Adding slot [historical]", slotId
|
||||
validation.slots.incl(slotId)
|
||||
trace "Historical state restored", numberOfSlots = validation.slots.len
|
||||
|
||||
proc start*(validation: Validation) {.async.} =
|
||||
trace "Starting validator", groups = validation.config.groups,
|
||||
groupIndex = validation.config.groupIndex
|
||||
validation.periodicity = await validation.market.periodicity()
|
||||
validation.proofTimeout = await validation.market.proofTimeout()
|
||||
await validation.subscribeSlotFilled()
|
||||
await validation.restoreHistoricalState()
|
||||
validation.running = validation.run()
|
||||
asyncSpawn validation.running
|
||||
|
||||
proc stop*(validation: Validation) {.async.} =
|
||||
await validation.running.cancelAndWait()
|
||||
if not validation.running.isNil and not validation.running.finished:
|
||||
await validation.running.cancelAndWait()
|
||||
while validation.subscriptions.len > 0:
|
||||
let subscription = validation.subscriptions.pop()
|
||||
await subscription.unsubscribe()
|
||||
|
|
|
@ -5,6 +5,7 @@ ARG RUST_VERSION=${RUST_VERSION:-1.79.0}
|
|||
ARG BUILD_HOME=/src
|
||||
ARG MAKE_PARALLEL=${MAKE_PARALLEL:-4}
|
||||
ARG NIMFLAGS="${NIMFLAGS:-"-d:disableMarchNative"}"
|
||||
ARG USE_LIBBACKTRACE=${USE_LIBBACKTRACE:-1}
|
||||
ARG APP_HOME=/codex
|
||||
ARG NAT_IP_AUTO=${NAT_IP_AUTO:-false}
|
||||
|
||||
|
@ -14,6 +15,7 @@ ARG RUST_VERSION
|
|||
ARG BUILD_HOME
|
||||
ARG MAKE_PARALLEL
|
||||
ARG NIMFLAGS
|
||||
ARG USE_LIBBACKTRACE
|
||||
|
||||
RUN apt-get update && apt-get install -y git cmake curl make bash lcov build-essential
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${RUST_VERSION} -y
|
||||
|
|
37
flake.lock
37
flake.lock
|
@ -1,6 +1,40 @@
|
|||
{
|
||||
"nodes": {
|
||||
"circom-compat": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1732627240,
|
||||
"narHash": "sha256-GvJTiBWBv799i5ZCCc4gF86bnQY/nZvx0vCPi1+OPD4=",
|
||||
"owner": "codex-storage",
|
||||
"repo": "circom-compat-ffi",
|
||||
"rev": "297c46fdc7d8a8fd53c8076b0be77334e4a54447",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "codex-storage",
|
||||
"repo": "circom-compat-ffi",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1731386116,
|
||||
"narHash": "sha256-lKA770aUmjPHdTaJWnP3yQ9OI1TigenUqVC3wweqZuI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "689fed12a013f56d4c4d3f612489634267d86529",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1729449015,
|
||||
"narHash": "sha256-Gf04dXB0n4q0A9G5nTGH3zuMGr6jtJppqdeljxua1fo=",
|
||||
|
@ -18,7 +52,8 @@
|
|||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
"circom-compat": "circom-compat",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
36
flake.nix
36
flake.nix
|
@ -3,32 +3,40 @@
|
|||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05";
|
||||
circom-compat.url = "github:codex-storage/circom-compat-ffi";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
outputs = { self, nixpkgs, circom-compat}:
|
||||
let
|
||||
supportedSystems = [
|
||||
stableSystems = [
|
||||
"x86_64-linux" "aarch64-linux"
|
||||
"x86_64-darwin" "aarch64-darwin"
|
||||
];
|
||||
forAllSystems = f: nixpkgs.lib.genAttrs supportedSystems (system: f system);
|
||||
forAllSystems = f: nixpkgs.lib.genAttrs stableSystems (system: f system);
|
||||
pkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
|
||||
in rec {
|
||||
packages = forAllSystems (system: let
|
||||
circomCompatPkg = circom-compat.packages.${system}.default;
|
||||
buildTarget = pkgsFor.${system}.callPackage ./nix/default.nix {
|
||||
inherit stableSystems circomCompatPkg;
|
||||
src = self;
|
||||
};
|
||||
build = targets: buildTarget.override { inherit targets; };
|
||||
in rec {
|
||||
codex = build ["all"];
|
||||
default = codex;
|
||||
});
|
||||
|
||||
devShells = forAllSystems (system: let
|
||||
pkgs = pkgsFor.${system};
|
||||
inherit (pkgs) lib stdenv mkShell;
|
||||
in {
|
||||
default = mkShell.override { stdenv = pkgs.gcc11Stdenv; } {
|
||||
buildInputs = with pkgs; [
|
||||
# General
|
||||
git pkg-config openssl lsb-release
|
||||
# Build
|
||||
rustc cargo nimble gcc11 cmake nim-unwrapped-1
|
||||
# Libraries
|
||||
gmp llvmPackages.openmp
|
||||
# Tests
|
||||
nodejs_18
|
||||
default = pkgs.mkShell {
|
||||
inputsFrom = [
|
||||
packages.${system}.codex
|
||||
circom-compat.packages.${system}.default
|
||||
];
|
||||
# Not using buildInputs to override fakeGit and fakeCargo.
|
||||
nativeBuildInputs = with pkgs; [ git cargo nodejs_18 ];
|
||||
};
|
||||
});
|
||||
};
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
# Usage
|
||||
|
||||
## Shell
|
||||
|
||||
A development shell can be started using:
|
||||
```sh
|
||||
nix develop
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
To build a Codex you can use:
|
||||
```sh
|
||||
nix build '.?submodules=1#codex'
|
||||
```
|
||||
The `?submodules=1` part should eventually not be necessary.
|
||||
For more details see:
|
||||
https://github.com/NixOS/nix/issues/4423
|
||||
|
||||
It can be also done without even cloning the repo:
|
||||
```sh
|
||||
nix build 'github:codex-storage/nim-codex?submodules=1'
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
```sh
|
||||
nix run 'github:codex-storage/nim-codex?submodules=1'
|
||||
```
|
|
@ -0,0 +1,84 @@
|
|||
{
|
||||
pkgs ? import <nixpkgs> { },
|
||||
src ? ../.,
|
||||
targets ? ["all"],
|
||||
# Options: 0,1,2
|
||||
verbosity ? 0,
|
||||
# Use system Nim compiler instead of building it with nimbus-build-system
|
||||
useSystemNim ? true,
|
||||
commit ? builtins.substring 0 7 (src.rev or "dirty"),
|
||||
# These are the only platforms tested in CI and considered stable.
|
||||
stableSystems ? [
|
||||
"x86_64-linux" "aarch64-linux"
|
||||
"x86_64-darwin" "aarch64-darwin"
|
||||
],
|
||||
circomCompatPkg ? (
|
||||
builtins.getFlake "github:codex-storage/circom-compat-ffi"
|
||||
).packages.${builtins.currentSystem}.default
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (pkgs) stdenv lib writeScriptBin callPackage;
|
||||
|
||||
revision = lib.substring 0 8 (src.rev or "dirty");
|
||||
|
||||
tools = callPackage ./tools.nix {};
|
||||
in pkgs.gcc11Stdenv.mkDerivation rec {
|
||||
|
||||
pname = "codex";
|
||||
|
||||
version = "${tools.findKeyValue "version = \"([0-9]+\.[0-9]+\.[0-9]+)\"" ../codex.nimble}-${revision}";
|
||||
|
||||
inherit src;
|
||||
|
||||
# Dependencies that should exist in the runtime environment.
|
||||
buildInputs = with pkgs; [
|
||||
openssl
|
||||
gmp
|
||||
];
|
||||
|
||||
# Dependencies that should only exist in the build environment.
|
||||
nativeBuildInputs = let
|
||||
# Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'.
|
||||
fakeGit = writeScriptBin "git" "echo ${version}";
|
||||
# Fix for the nim-circom-compat-ffi package that is built with cargo.
|
||||
fakeCargo = writeScriptBin "cargo" "echo ${version}";
|
||||
in
|
||||
with pkgs; [
|
||||
cmake
|
||||
pkg-config
|
||||
nimble
|
||||
which
|
||||
nim-unwrapped-1
|
||||
lsb-release
|
||||
circomCompatPkg
|
||||
fakeGit
|
||||
fakeCargo
|
||||
];
|
||||
|
||||
# Disable CPU optmizations that make binary not portable.
|
||||
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
|
||||
# Avoid Nim cache permission errors.
|
||||
XDG_CACHE_HOME = "/tmp";
|
||||
|
||||
makeFlags = targets ++ [
|
||||
"V=${toString verbosity}"
|
||||
"USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}"
|
||||
];
|
||||
|
||||
configurePhase = ''
|
||||
patchShebangs . > /dev/null
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
cp build/codex $out/bin/
|
||||
'';
|
||||
|
||||
meta = with pkgs.lib; {
|
||||
description = "Codex storage system";
|
||||
homepage = "https://github.com/codex-storage/nim-codex";
|
||||
license = licenses.mit;
|
||||
platforms = stableSystems;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
{ pkgs ? import <nixpkgs> { } }:
|
||||
|
||||
let
|
||||
|
||||
inherit (pkgs.lib) fileContents last splitString flatten remove;
|
||||
inherit (builtins) map match;
|
||||
in {
|
||||
findKeyValue = regex: sourceFile:
|
||||
let
|
||||
linesFrom = file: splitString "\n" (fileContents file);
|
||||
matching = regex: lines: map (line: match regex line) lines;
|
||||
extractMatch = matches: last (flatten (remove null matches));
|
||||
in
|
||||
extractMatch (matching regex (linesFrom sourceFile));
|
||||
}
|
|
@ -22,6 +22,7 @@ asyncchecksuite "Advertiser":
|
|||
blockDiscovery: MockDiscovery
|
||||
localStore: BlockStore
|
||||
advertiser: Advertiser
|
||||
advertised: seq[Cid]
|
||||
let
|
||||
manifest = Manifest.new(
|
||||
treeCid = Cid.example,
|
||||
|
@ -33,6 +34,11 @@ asyncchecksuite "Advertiser":
|
|||
blockDiscovery = MockDiscovery.new()
|
||||
localStore = CacheStore.new()
|
||||
|
||||
advertised = newSeq[Cid]()
|
||||
blockDiscovery.publishBlockProvideHandler =
|
||||
proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} =
|
||||
advertised.add(cid)
|
||||
|
||||
advertiser = Advertiser.new(
|
||||
localStore,
|
||||
blockDiscovery
|
||||
|
@ -43,47 +49,44 @@ asyncchecksuite "Advertiser":
|
|||
teardown:
|
||||
await advertiser.stop()
|
||||
|
||||
proc waitTillQueueEmpty() {.async.} =
|
||||
check eventually advertiser.advertiseQueue.len == 0
|
||||
|
||||
test "blockStored should queue manifest Cid for advertising":
|
||||
(await localStore.putBlock(manifestBlk)).tryGet()
|
||||
|
||||
await waitTillQueueEmpty()
|
||||
|
||||
check:
|
||||
manifestBlk.cid in advertiser.advertiseQueue
|
||||
manifestBlk.cid in advertised
|
||||
|
||||
test "blockStored should queue tree Cid for advertising":
|
||||
(await localStore.putBlock(manifestBlk)).tryGet()
|
||||
|
||||
await waitTillQueueEmpty()
|
||||
|
||||
check:
|
||||
manifest.treeCid in advertiser.advertiseQueue
|
||||
manifest.treeCid in advertised
|
||||
|
||||
test "blockStored should not queue non-manifest non-tree CIDs for discovery":
|
||||
let blk = bt.Block.example
|
||||
|
||||
(await localStore.putBlock(blk)).tryGet()
|
||||
|
||||
await waitTillQueueEmpty()
|
||||
|
||||
check:
|
||||
blk.cid notin advertiser.advertiseQueue
|
||||
blk.cid notin advertised
|
||||
|
||||
test "Should not queue if there is already an inflight advertise request":
|
||||
var
|
||||
reqs = newFuture[void]()
|
||||
manifestCount = 0
|
||||
treeCount = 0
|
||||
|
||||
blockDiscovery.publishBlockProvideHandler =
|
||||
proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} =
|
||||
if cid == manifestBlk.cid:
|
||||
inc manifestCount
|
||||
if cid == manifest.treeCid:
|
||||
inc treeCount
|
||||
|
||||
await reqs # queue the request
|
||||
|
||||
(await localStore.putBlock(manifestBlk)).tryGet()
|
||||
(await localStore.putBlock(manifestBlk)).tryGet()
|
||||
|
||||
reqs.complete()
|
||||
check eventually manifestCount == 1
|
||||
check eventually treeCount == 1
|
||||
await waitTillQueueEmpty()
|
||||
|
||||
check eventually advertised.len == 2
|
||||
check manifestBlk.cid in advertised
|
||||
check manifest.treeCid in advertised
|
||||
|
||||
test "Should advertise existing manifests and their trees":
|
||||
let
|
||||
|
@ -96,8 +99,8 @@ asyncchecksuite "Advertiser":
|
|||
)
|
||||
await advertiser.start()
|
||||
|
||||
check eventually manifestBlk.cid in advertiser.advertiseQueue
|
||||
check eventually manifest.treeCid in advertiser.advertiseQueue
|
||||
check eventually manifestBlk.cid in advertised
|
||||
check eventually manifest.treeCid in advertised
|
||||
|
||||
test "Stop should clear onBlockStored callback":
|
||||
await advertiser.stop()
|
||||
|
|
|
@ -40,3 +40,6 @@ method waitUntil*(clock: MockClock, time: SecondsSince1970) {.async.} =
|
|||
let future = newFuture[void]()
|
||||
clock.waiting.add(Waiting(until: time, future: future))
|
||||
await future
|
||||
|
||||
proc isWaiting*(clock: MockClock): bool =
|
||||
clock.waiting.len > 0
|
||||
|
|
|
@ -8,11 +8,18 @@ import pkg/codex/market
|
|||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/contracts/proofs
|
||||
import pkg/codex/contracts/config
|
||||
|
||||
from pkg/ethers import BlockTag
|
||||
import codex/clock
|
||||
|
||||
import ../examples
|
||||
|
||||
export market
|
||||
export tables
|
||||
|
||||
logScope:
|
||||
topics = "mockMarket"
|
||||
|
||||
type
|
||||
MockMarket* = ref object of Market
|
||||
periodicity: Periodicity
|
||||
|
@ -40,6 +47,7 @@ type
|
|||
config*: MarketplaceConfig
|
||||
canReserveSlot*: bool
|
||||
reserveSlotThrowError*: ?(ref MarketError)
|
||||
clock: ?Clock
|
||||
Fulfillment* = object
|
||||
requestId*: RequestId
|
||||
proof*: Groth16Proof
|
||||
|
@ -49,6 +57,7 @@ type
|
|||
host*: Address
|
||||
slotIndex*: UInt256
|
||||
proof*: Groth16Proof
|
||||
timestamp: ?SecondsSince1970
|
||||
Subscriptions = object
|
||||
onRequest: seq[RequestSubscription]
|
||||
onFulfillment: seq[FulfillmentSubscription]
|
||||
|
@ -94,7 +103,7 @@ proc hash*(address: Address): Hash =
|
|||
proc hash*(requestId: RequestId): Hash =
|
||||
hash(requestId.toArray)
|
||||
|
||||
proc new*(_: type MockMarket): MockMarket =
|
||||
proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket =
|
||||
## Create a new mocked Market instance
|
||||
##
|
||||
let config = MarketplaceConfig(
|
||||
|
@ -111,7 +120,8 @@ proc new*(_: type MockMarket): MockMarket =
|
|||
downtimeProduct: 67.uint8
|
||||
)
|
||||
)
|
||||
MockMarket(signer: Address.example, config: config, canReserveSlot: true)
|
||||
MockMarket(signer: Address.example, config: config,
|
||||
canReserveSlot: true, clock: clock)
|
||||
|
||||
method getSigner*(market: MockMarket): Future[Address] {.async.} =
|
||||
return market.signer
|
||||
|
@ -125,6 +135,9 @@ method proofTimeout*(market: MockMarket): Future[UInt256] {.async.} =
|
|||
method proofDowntime*(market: MockMarket): Future[uint8] {.async.} =
|
||||
return market.config.proofs.downtime
|
||||
|
||||
method repairRewardPercentage*(market: MockMarket): Future[uint8] {.async.} =
|
||||
return market.config.collateral.repairRewardPercentage
|
||||
|
||||
method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
return market.proofPointer
|
||||
|
||||
|
@ -142,7 +155,7 @@ method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} =
|
|||
method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} =
|
||||
return market.activeSlots[market.signer]
|
||||
|
||||
method getRequest(market: MockMarket,
|
||||
method getRequest*(market: MockMarket,
|
||||
id: RequestId): Future[?StorageRequest] {.async.} =
|
||||
for request in market.requested:
|
||||
if request.id == id:
|
||||
|
@ -245,7 +258,8 @@ proc fillSlot*(market: MockMarket,
|
|||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
proof: proof,
|
||||
host: host
|
||||
host: host,
|
||||
timestamp: market.clock.?now
|
||||
)
|
||||
market.filled.add(slot)
|
||||
market.slotState[slotId(slot.requestId, slot.slotIndex)] = SlotState.Filled
|
||||
|
@ -469,21 +483,51 @@ method subscribeProofSubmission*(mock: MockMarket,
|
|||
mock.subscriptions.onProofSubmitted.add(subscription)
|
||||
return subscription
|
||||
|
||||
method queryPastEvents*[T: MarketplaceEvent](
|
||||
market: MockMarket,
|
||||
_: type T,
|
||||
blocksAgo: int): Future[seq[T]] {.async.} =
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: MockMarket,
|
||||
fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} =
|
||||
return market.requested.map(request =>
|
||||
StorageRequested(requestId: request.id,
|
||||
ask: request.ask,
|
||||
expiry: request.expiry)
|
||||
)
|
||||
|
||||
if T of StorageRequested:
|
||||
return market.requested.map(request =>
|
||||
StorageRequested(requestId: request.id,
|
||||
ask: request.ask,
|
||||
expiry: request.expiry)
|
||||
)
|
||||
elif T of SlotFilled:
|
||||
return market.filled.map(slot =>
|
||||
SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex)
|
||||
)
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: MockMarket,
|
||||
blocksAgo: int): Future[seq[StorageRequested]] {.async.} =
|
||||
return market.requested.map(request =>
|
||||
StorageRequested(requestId: request.id,
|
||||
ask: request.ask,
|
||||
expiry: request.expiry)
|
||||
)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: MockMarket,
|
||||
fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} =
|
||||
return market.filled.map(slot =>
|
||||
SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex)
|
||||
)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: MockMarket,
|
||||
blocksAgo: int): Future[seq[SlotFilled]] {.async.} =
|
||||
return market.filled.map(slot =>
|
||||
SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex)
|
||||
)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: MockMarket,
|
||||
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} =
|
||||
let filtered = market.filled.filter(
|
||||
proc (slot: MockSlot): bool =
|
||||
if timestamp =? slot.timestamp:
|
||||
return timestamp >= fromTime
|
||||
else:
|
||||
true
|
||||
)
|
||||
return filtered.map(slot =>
|
||||
SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex)
|
||||
)
|
||||
|
||||
method unsubscribe*(subscription: RequestSubscription) {.async.} =
|
||||
subscription.market.subscriptions.onRequest.keepItIf(it != subscription)
|
||||
|
|
|
@ -44,8 +44,6 @@ asyncchecksuite "sales state 'initialproving'":
|
|||
state = SaleInitialProving.new()
|
||||
|
||||
proc allowProofToStart {.async.} =
|
||||
# wait until we're in initialproving state
|
||||
await sleepAsync(10.millis)
|
||||
# it won't start proving until the next period
|
||||
await clock.advanceToNextPeriod(market)
|
||||
|
||||
|
@ -59,7 +57,7 @@ asyncchecksuite "sales state 'initialproving'":
|
|||
|
||||
test "waits for the beginning of the period to get the challenge":
|
||||
let future = state.run(agent)
|
||||
await sleepAsync(10.millis)
|
||||
check eventually clock.isWaiting
|
||||
check not future.finished
|
||||
await allowProofToStart()
|
||||
discard await future
|
||||
|
@ -68,7 +66,7 @@ asyncchecksuite "sales state 'initialproving'":
|
|||
market.proofPointer = 250
|
||||
let future = state.run(agent)
|
||||
await allowProofToStart()
|
||||
await sleepAsync(10.millis)
|
||||
check eventually clock.isWaiting
|
||||
check not future.finished
|
||||
market.proofPointer = 100
|
||||
await allowProofToStart()
|
||||
|
|
|
@ -189,14 +189,16 @@ asyncchecksuite "Sales":
|
|||
await repoTmp.destroyDb()
|
||||
await metaTmp.destroyDb()
|
||||
|
||||
proc isInState(idx: int, state: string): Future[bool] {.async.} =
|
||||
proc isInState(idx: int, state: string): bool =
|
||||
proc description(state: State): string =
|
||||
$state
|
||||
check eventually sales.agents.len > idx
|
||||
|
||||
if idx >= sales.agents.len:
|
||||
return false
|
||||
sales.agents[idx].query(description) == state.some
|
||||
|
||||
proc allowRequestToStart {.async.} =
|
||||
check eventually (await isInState(0, "SaleInitialProving"))
|
||||
check eventually isInState(0, "SaleInitialProving")
|
||||
# it won't start proving until the next period
|
||||
await clock.advanceToNextPeriod(market)
|
||||
|
||||
|
@ -564,6 +566,7 @@ asyncchecksuite "Sales":
|
|||
request.ask.slots = 2
|
||||
market.requested = @[request]
|
||||
market.requestState[request.id] = RequestState.New
|
||||
market.requestEnds[request.id] = request.expiry.toSecondsSince1970
|
||||
|
||||
proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} =
|
||||
let address = await market.getSigner()
|
||||
|
|
|
@ -27,8 +27,8 @@ suite "Slot queue start/stop":
|
|||
check not queue.running
|
||||
|
||||
test "can call start multiple times, and when already running":
|
||||
asyncSpawn queue.start()
|
||||
asyncSpawn queue.start()
|
||||
queue.start()
|
||||
queue.start()
|
||||
check queue.running
|
||||
|
||||
test "can call stop when alrady stopped":
|
||||
|
@ -36,12 +36,12 @@ suite "Slot queue start/stop":
|
|||
check not queue.running
|
||||
|
||||
test "can call stop when running":
|
||||
asyncSpawn queue.start()
|
||||
queue.start()
|
||||
await queue.stop()
|
||||
check not queue.running
|
||||
|
||||
test "can call stop multiple times":
|
||||
asyncSpawn queue.start()
|
||||
queue.start()
|
||||
await queue.stop()
|
||||
await queue.stop()
|
||||
check not queue.running
|
||||
|
@ -62,8 +62,6 @@ suite "Slot queue workers":
|
|||
queue = SlotQueue.new(maxSize = 5, maxWorkers = 3)
|
||||
queue.onProcessSlot = onProcessSlot
|
||||
|
||||
proc startQueue = asyncSpawn queue.start()
|
||||
|
||||
teardown:
|
||||
await queue.stop()
|
||||
|
||||
|
@ -79,7 +77,7 @@ suite "Slot queue workers":
|
|||
discard SlotQueue.new(maxSize = 1, maxWorkers = 2)
|
||||
|
||||
test "does not surpass max workers":
|
||||
startQueue()
|
||||
queue.start()
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
|
@ -97,7 +95,7 @@ suite "Slot queue workers":
|
|||
|
||||
queue.onProcessSlot = processSlot
|
||||
|
||||
startQueue()
|
||||
queue.start()
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
|
@ -122,7 +120,7 @@ suite "Slot queue":
|
|||
onProcessSlotCalled = true
|
||||
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
|
||||
done.complete()
|
||||
asyncSpawn queue.start()
|
||||
queue.start()
|
||||
|
||||
setup:
|
||||
onProcessSlotCalled = false
|
||||
|
|
|
@ -3,7 +3,6 @@ import ./utils/testkeyutils
|
|||
import ./utils/testasyncstatemachine
|
||||
import ./utils/testasynciter
|
||||
import ./utils/testtimer
|
||||
import ./utils/testthen
|
||||
import ./utils/testtrackedfutures
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import pkg/chronos
|
||||
import std/strformat
|
||||
import std/random
|
||||
import std/times
|
||||
|
||||
import codex/validation
|
||||
import codex/periods
|
||||
import codex/clock
|
||||
|
||||
import ../asynctest
|
||||
import ./helpers/mockmarket
|
||||
|
@ -11,6 +12,9 @@ import ./helpers/mockclock
|
|||
import ./examples
|
||||
import ./helpers
|
||||
|
||||
logScope:
|
||||
topics = "testValidation"
|
||||
|
||||
asyncchecksuite "validation":
|
||||
let period = 10
|
||||
let timeout = 5
|
||||
|
@ -20,10 +24,10 @@ asyncchecksuite "validation":
|
|||
let proof = Groth16Proof.example
|
||||
let collateral = slot.request.ask.collateral
|
||||
|
||||
var validation: Validation
|
||||
var market: MockMarket
|
||||
var clock: MockClock
|
||||
var groupIndex: uint16
|
||||
var validation: Validation
|
||||
|
||||
proc initValidationConfig(maxSlots: MaxSlots,
|
||||
validationGroups: ?ValidationGroups,
|
||||
|
@ -32,19 +36,27 @@ asyncchecksuite "validation":
|
|||
maxSlots, groups=validationGroups, groupIndex), error:
|
||||
raiseAssert fmt"Creating ValidationConfig failed! Error msg: {error.msg}"
|
||||
validationConfig
|
||||
|
||||
proc newValidation(clock: Clock,
|
||||
market: Market,
|
||||
maxSlots: MaxSlots,
|
||||
validationGroups: ?ValidationGroups,
|
||||
groupIndex: uint16 = 0): Validation =
|
||||
let validationConfig = initValidationConfig(
|
||||
maxSlots, validationGroups, groupIndex)
|
||||
Validation.new(clock, market, validationConfig)
|
||||
|
||||
setup:
|
||||
groupIndex = groupIndexForSlotId(slot.id, !validationGroups)
|
||||
market = MockMarket.new()
|
||||
clock = MockClock.new()
|
||||
let validationConfig = initValidationConfig(
|
||||
maxSlots, validationGroups, groupIndex)
|
||||
validation = Validation.new(clock, market, validationConfig)
|
||||
market = MockMarket.new(clock = Clock(clock).some)
|
||||
market.config.proofs.period = period.u256
|
||||
market.config.proofs.timeout = timeout.u256
|
||||
await validation.start()
|
||||
validation = newValidation(
|
||||
clock, market, maxSlots, validationGroups, groupIndex)
|
||||
|
||||
teardown:
|
||||
# calling stop on validation that did not start is harmless
|
||||
await validation.stop()
|
||||
|
||||
proc advanceToNextPeriod =
|
||||
|
@ -79,6 +91,7 @@ asyncchecksuite "validation":
|
|||
test "initializing ValidationConfig fails when maxSlots is negative " &
|
||||
"(validationGroups set)":
|
||||
let maxSlots = -1
|
||||
let groupIndex = 0'u16
|
||||
let validationConfig = ValidationConfig.init(
|
||||
maxSlots = maxSlots, groups = validationGroups, groupIndex)
|
||||
check validationConfig.isFailure == true
|
||||
|
@ -86,45 +99,41 @@ asyncchecksuite "validation":
|
|||
fmt"be greater than or equal to 0! (got: {maxSlots})"
|
||||
|
||||
test "slot is not observed if it is not in the validation group":
|
||||
let validationConfig = initValidationConfig(maxSlots, validationGroups,
|
||||
(groupIndex + 1) mod uint16(!validationGroups))
|
||||
let validation = Validation.new(clock, market, validationConfig)
|
||||
validation = newValidation(clock, market, maxSlots, validationGroups,
|
||||
(groupIndex + 1) mod uint16(!validationGroups))
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
await validation.stop()
|
||||
check validation.slots.len == 0
|
||||
|
||||
test "when a slot is filled on chain, it is added to the list":
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
check validation.slots == @[slot.id]
|
||||
|
||||
test "slot should be observed if maxSlots is set to 0":
|
||||
let validationConfig = initValidationConfig(
|
||||
maxSlots = 0, ValidationGroups.none)
|
||||
let validation = Validation.new(clock, market, validationConfig)
|
||||
validation = newValidation(clock, market, maxSlots = 0, ValidationGroups.none)
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
await validation.stop()
|
||||
check validation.slots == @[slot.id]
|
||||
|
||||
test "slot should be observed if validation group is not set (and " &
|
||||
"maxSlots is not 0)":
|
||||
let validationConfig = initValidationConfig(
|
||||
maxSlots, ValidationGroups.none)
|
||||
let validation = Validation.new(clock, market, validationConfig)
|
||||
validation = newValidation(clock, market, maxSlots, ValidationGroups.none)
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
await validation.stop()
|
||||
check validation.slots == @[slot.id]
|
||||
|
||||
for state in [SlotState.Finished, SlotState.Failed]:
|
||||
test fmt"when slot state changes to {state}, it is removed from the list":
|
||||
validation = newValidation(clock, market, maxSlots, validationGroups)
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
market.slotState[slot.id] = state
|
||||
advanceToNextPeriod()
|
||||
check eventually validation.slots.len == 0
|
||||
|
||||
test "when a proof is missed, it is marked as missing":
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
market.setCanProofBeMarkedAsMissing(slot.id, true)
|
||||
advanceToNextPeriod()
|
||||
|
@ -132,6 +141,7 @@ asyncchecksuite "validation":
|
|||
check market.markedAsMissingProofs.contains(slot.id)
|
||||
|
||||
test "when a proof can not be marked as missing, it will not be marked":
|
||||
await validation.start()
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
market.setCanProofBeMarkedAsMissing(slot.id, false)
|
||||
advanceToNextPeriod()
|
||||
|
@ -139,13 +149,73 @@ asyncchecksuite "validation":
|
|||
check market.markedAsMissingProofs.len == 0
|
||||
|
||||
test "it does not monitor more than the maximum number of slots":
|
||||
let validationGroups = ValidationGroups.none
|
||||
let validationConfig = initValidationConfig(
|
||||
maxSlots, validationGroups)
|
||||
let validation = Validation.new(clock, market, validationConfig)
|
||||
validation = newValidation(clock, market, maxSlots, ValidationGroups.none)
|
||||
await validation.start()
|
||||
for _ in 0..<maxSlots + 1:
|
||||
let slot = Slot.example
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
await validation.stop()
|
||||
check validation.slots.len == maxSlots
|
||||
|
||||
suite "restoring historical state":
|
||||
test "it retrieves the historical state " &
|
||||
"for max 30 days in the past":
|
||||
let earlySlot = Slot.example
|
||||
await market.fillSlot(earlySlot.request.id, earlySlot.slotIndex, proof, collateral)
|
||||
let fromTime = clock.now()
|
||||
clock.set(fromTime + 1)
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
|
||||
let duration: times.Duration = initDuration(days = 30)
|
||||
clock.set(fromTime + duration.inSeconds + 1)
|
||||
|
||||
validation = newValidation(clock, market, maxSlots = 0,
|
||||
ValidationGroups.none)
|
||||
await validation.start()
|
||||
|
||||
check validation.slots == @[slot.id]
|
||||
|
||||
for state in [SlotState.Finished, SlotState.Failed]:
|
||||
test "when restoring historical state, " &
|
||||
fmt"it excludes slots in {state} state":
|
||||
let slot1 = Slot.example
|
||||
let slot2 = Slot.example
|
||||
await market.fillSlot(slot1.request.id, slot1.slotIndex,
|
||||
proof, collateral)
|
||||
await market.fillSlot(slot2.request.id, slot2.slotIndex,
|
||||
proof, collateral)
|
||||
|
||||
market.slotState[slot1.id] = state
|
||||
|
||||
validation = newValidation(clock, market, maxSlots = 0,
|
||||
ValidationGroups.none)
|
||||
await validation.start()
|
||||
|
||||
check validation.slots == @[slot2.id]
|
||||
|
||||
test "it does not monitor more than the maximum number of slots ":
|
||||
for _ in 0..<maxSlots + 1:
|
||||
let slot = Slot.example
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
validation = newValidation(clock, market, maxSlots, ValidationGroups.none)
|
||||
await validation.start()
|
||||
check validation.slots.len == maxSlots
|
||||
|
||||
test "slot is not observed if it is not in the validation group":
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
validation = newValidation(clock, market, maxSlots, validationGroups,
|
||||
(groupIndex + 1) mod uint16(!validationGroups))
|
||||
await validation.start()
|
||||
check validation.slots.len == 0
|
||||
|
||||
test "slot should be observed if maxSlots is set to 0":
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
validation = newValidation(clock, market, maxSlots = 0, ValidationGroups.none)
|
||||
await validation.start()
|
||||
check validation.slots == @[slot.id]
|
||||
|
||||
test "slot should be observed if validation " &
|
||||
"group is not set (and maxSlots is not 0)":
|
||||
await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral)
|
||||
validation = newValidation(clock, market, maxSlots, ValidationGroups.none)
|
||||
await validation.start()
|
||||
check validation.slots == @[slot.id]
|
||||
|
|
|
@ -1,414 +0,0 @@
|
|||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import codex/utils/then
|
||||
|
||||
import ../../asynctest
|
||||
import ../helpers
|
||||
|
||||
proc newError(): ref CatchableError =
|
||||
(ref CatchableError)(msg: "some error")
|
||||
|
||||
asyncchecksuite "then - Future[void]":
|
||||
var error = newError()
|
||||
var future: Future[void]
|
||||
|
||||
setup:
|
||||
future = newFuture[void]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var firedImmediately = false
|
||||
future.complete()
|
||||
discard future.then(proc() = firedImmediately = true)
|
||||
check eventually firedImmediately
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var fired = false
|
||||
discard future.then(proc() = fired = true)
|
||||
future.complete()
|
||||
check eventually fired
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete()
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
future.complete()
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
|
||||
asyncchecksuite "then - Future[T]":
|
||||
var error = newError()
|
||||
var future: Future[int]
|
||||
|
||||
setup:
|
||||
future = newFuture[int]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var cbVal = 0
|
||||
future.complete(1)
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var cbVal = 0
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
future.complete(1)
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete(1)
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
future.complete(1)
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
|
||||
asyncchecksuite "then - Future[?!void]":
|
||||
var error = newError()
|
||||
var future: Future[?!void]
|
||||
|
||||
setup:
|
||||
future = newFuture[?!void]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var firedImmediately = false
|
||||
future.complete(success())
|
||||
discard future.then(proc() = firedImmediately = true)
|
||||
check eventually firedImmediately
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var fired = false
|
||||
discard future.then(proc() = fired = true)
|
||||
future.complete(success())
|
||||
check eventually fired
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete(success())
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
future.complete(success())
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
|
||||
asyncchecksuite "then - Future[?!T]":
|
||||
var error = newError()
|
||||
var future: Future[?!int]
|
||||
|
||||
setup:
|
||||
future = newFuture[?!int]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var cbVal = 0
|
||||
future.complete(success(1))
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var cbVal = 0
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
future.complete(success(1))
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete(success(1))
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
future.complete(success(1))
|
||||
check eventually onSuccessCalledTimes == 3
|
|
@ -18,36 +18,36 @@ asyncchecksuite "tracked futures":
|
|||
|
||||
test "tracks unfinished futures":
|
||||
let fut = newFuture[void]("test")
|
||||
discard fut.track(module)
|
||||
module.trackedFutures.track(fut)
|
||||
check module.trackedFutures.len == 1
|
||||
|
||||
test "does not track completed futures":
|
||||
let fut = newFuture[void]("test")
|
||||
fut.complete()
|
||||
discard fut.track(module)
|
||||
module.trackedFutures.track(fut)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "does not track failed futures":
|
||||
let fut = newFuture[void]("test")
|
||||
fut.fail((ref CatchableError)(msg: "some error"))
|
||||
discard fut.track(module)
|
||||
module.trackedFutures.track(fut)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "does not track cancelled futures":
|
||||
let fut = newFuture[void]("test")
|
||||
await fut.cancelAndWait()
|
||||
discard fut.track(module)
|
||||
module.trackedFutures.track(fut)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "removes tracked future when finished":
|
||||
let fut = newFuture[void]("test")
|
||||
discard fut.track(module)
|
||||
module.trackedFutures.track(fut)
|
||||
fut.complete()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "removes tracked future when cancelled":
|
||||
let fut = newFuture[void]("test")
|
||||
discard fut.track(module)
|
||||
module.trackedFutures.track(fut)
|
||||
await fut.cancelAndWait()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
|
@ -55,9 +55,9 @@ asyncchecksuite "tracked futures":
|
|||
let fut1 = newFuture[void]("test1")
|
||||
let fut2 = newFuture[void]("test2")
|
||||
let fut3 = newFuture[void]("test3")
|
||||
discard fut1.track(module)
|
||||
discard fut2.track(module)
|
||||
discard fut3.track(module)
|
||||
module.trackedFutures.track(fut1)
|
||||
module.trackedFutures.track(fut2)
|
||||
module.trackedFutures.track(fut3)
|
||||
await module.trackedFutures.cancelTracked()
|
||||
check eventually fut1.cancelled
|
||||
check eventually fut2.cancelled
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
import std/strutils
|
||||
import std/tables
|
||||
|
||||
import pkg/ethers/provider
|
||||
from codex/clock import SecondsSince1970
|
||||
|
||||
export provider.Block
|
||||
|
||||
type MockProvider* = ref object of Provider
|
||||
blocks: OrderedTableRef[int, Block]
|
||||
earliest: ?int
|
||||
latest: ?int
|
||||
|
||||
method getBlock*(
|
||||
provider: MockProvider,
|
||||
tag: BlockTag
|
||||
): Future[?Block] {.async: (raises:[ProviderError]).} =
|
||||
try:
|
||||
if tag == BlockTag.latest:
|
||||
if latestBlock =? provider.latest:
|
||||
if provider.blocks.hasKey(latestBlock):
|
||||
return provider.blocks[latestBlock].some
|
||||
elif tag == BlockTag.earliest:
|
||||
if earliestBlock =? provider.earliest:
|
||||
if provider.blocks.hasKey(earliestBlock):
|
||||
return provider.blocks[earliestBlock].some
|
||||
elif tag == BlockTag.pending:
|
||||
raiseAssert "MockProvider does not yet support BlockTag.pending"
|
||||
else:
|
||||
let blockNumber = parseHexInt($tag)
|
||||
if provider.blocks.hasKey(blockNumber):
|
||||
return provider.blocks[blockNumber].some
|
||||
return Block.none
|
||||
except:
|
||||
return Block.none
|
||||
|
||||
|
||||
proc updateEarliestAndLatest(provider: MockProvider, blockNumber: int) =
|
||||
if provider.earliest.isNone:
|
||||
provider.earliest = blockNumber.some
|
||||
provider.latest = blockNumber.some
|
||||
|
||||
proc addBlocks*(provider: MockProvider, blocks: OrderedTableRef[int, Block]) =
|
||||
for number, blk in blocks.pairs:
|
||||
if provider.blocks.hasKey(number):
|
||||
continue
|
||||
provider.updateEarliestAndLatest(number)
|
||||
provider.blocks[number] = blk
|
||||
|
||||
proc addBlock*(provider: MockProvider, number: int, blk: Block) =
|
||||
if not provider.blocks.hasKey(number):
|
||||
provider.updateEarliestAndLatest(number)
|
||||
provider.blocks[number] = blk
|
||||
|
||||
proc newMockProvider*(): MockProvider =
|
||||
MockProvider(
|
||||
blocks: newOrderedTable[int, Block](),
|
||||
earliest: int.none,
|
||||
latest: int.none
|
||||
)
|
||||
|
||||
proc newMockProvider*(blocks: OrderedTableRef[int, Block]): MockProvider =
|
||||
let provider = newMockProvider()
|
||||
provider.addBlocks(blocks)
|
||||
provider
|
||||
|
||||
proc newMockProvider*(
|
||||
numberOfBlocks: int,
|
||||
earliestBlockNumber: int,
|
||||
earliestBlockTimestamp: SecondsSince1970,
|
||||
timeIntervalBetweenBlocks: SecondsSince1970
|
||||
): MockProvider =
|
||||
var blocks = newOrderedTable[int, provider.Block]()
|
||||
var blockNumber = earliestBlockNumber
|
||||
var blockTime = earliestBlockTimestamp
|
||||
for i in 0..<numberOfBlocks:
|
||||
blocks[blockNumber] = provider.Block(number: blockNumber.u256.some,
|
||||
timestamp: blockTime.u256, hash: BlockHash.none)
|
||||
inc blockNumber
|
||||
inc blockTime, timeIntervalBetweenBlocks.int
|
||||
MockProvider(
|
||||
blocks: blocks,
|
||||
earliest: earliestBlockNumber.some,
|
||||
latest: (earliestBlockNumber + numberOfBlocks - 1).some
|
||||
)
|
|
@ -10,6 +10,12 @@ import ./deployment
|
|||
|
||||
privateAccess(OnChainMarket) # enable access to private fields
|
||||
|
||||
# to see supportive information in the test output
|
||||
# use `-d:"chronicles_enabled_topics:testMarket:DEBUG` option
|
||||
# when compiling the test file
|
||||
logScope:
|
||||
topics = "testMarket"
|
||||
|
||||
ethersuite "On-Chain Market":
|
||||
let proof = Groth16Proof.example
|
||||
|
||||
|
@ -66,6 +72,11 @@ ethersuite "On-Chain Market":
|
|||
):
|
||||
await advanceToNextPeriod()
|
||||
|
||||
test "caches marketplace configuration":
|
||||
check isNone market.configuration
|
||||
discard await market.periodicity()
|
||||
check isSome market.configuration
|
||||
|
||||
test "fails to instantiate when contract does not have a signer":
|
||||
let storageWithoutSigner = marketplace.connect(ethProvider)
|
||||
expect AssertionDefect:
|
||||
|
@ -298,7 +309,7 @@ ethersuite "On-Chain Market":
|
|||
let slotId = request.slotId(slotIndex.u256)
|
||||
while true:
|
||||
let slotState = await marketplace.slotState(slotId)
|
||||
if slotState == SlotState.Free:
|
||||
if slotState == SlotState.Repair or slotState == SlotState.Failed:
|
||||
break
|
||||
await waitUntilProofRequired(slotId)
|
||||
let missingPeriod = periodicity.periodOf(await ethProvider.currentTime())
|
||||
|
@ -407,7 +418,8 @@ ethersuite "On-Chain Market":
|
|||
# ago".
|
||||
|
||||
proc getsPastRequest(): Future[bool] {.async.} =
|
||||
let reqs = await market.queryPastEvents(StorageRequested, 5)
|
||||
let reqs =
|
||||
await market.queryPastStorageRequestedEvents(blocksAgo = 5)
|
||||
reqs.mapIt(it.requestId) == @[request.id, request1.id, request2.id]
|
||||
|
||||
check eventually await getsPastRequest()
|
||||
|
@ -426,19 +438,68 @@ ethersuite "On-Chain Market":
|
|||
# two PoA blocks per `fillSlot` call (6 blocks for 3 calls). We don't need
|
||||
# to check the `approve` for the first `fillSlot` call, so we only need to
|
||||
# check 5 "blocks ago".
|
||||
let events = await market.queryPastEvents(SlotFilled, 5)
|
||||
let events =
|
||||
await market.queryPastSlotFilledEvents(blocksAgo = 5)
|
||||
check events == @[
|
||||
SlotFilled(requestId: request.id, slotIndex: 0.u256),
|
||||
SlotFilled(requestId: request.id, slotIndex: 1.u256),
|
||||
SlotFilled(requestId: request.id, slotIndex: 2.u256),
|
||||
]
|
||||
|
||||
test "can query past SlotFilled events since given timestamp":
|
||||
await market.requestStorage(request)
|
||||
await market.reserveSlot(request.id, 0.u256)
|
||||
await market.fillSlot(request.id, 0.u256, proof, request.ask.collateral)
|
||||
|
||||
# The SlotFilled event will be included in the same block as
|
||||
# the fillSlot transaction. If we want to ignore the SlotFilled event
|
||||
# for this first slot, we need to jump to the next block and use the
|
||||
# timestamp of that block as our "fromTime" parameter to the
|
||||
# queryPastSlotFilledEvents function.
|
||||
await ethProvider.advanceTime(10.u256)
|
||||
|
||||
let (_, fromTime) =
|
||||
await ethProvider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
|
||||
await market.reserveSlot(request.id, 1.u256)
|
||||
await market.reserveSlot(request.id, 2.u256)
|
||||
await market.fillSlot(request.id, 1.u256, proof, request.ask.collateral)
|
||||
await market.fillSlot(request.id, 2.u256, proof, request.ask.collateral)
|
||||
|
||||
let events = await market.queryPastSlotFilledEvents(
|
||||
fromTime = fromTime.truncate(SecondsSince1970))
|
||||
|
||||
check events == @[
|
||||
SlotFilled(requestId: request.id, slotIndex: 1.u256),
|
||||
SlotFilled(requestId: request.id, slotIndex: 2.u256)
|
||||
]
|
||||
|
||||
test "queryPastSlotFilledEvents returns empty sequence of events when " &
|
||||
"no SlotFilled events have occurred since given timestamp":
|
||||
await market.requestStorage(request)
|
||||
await market.reserveSlot(request.id, 0.u256)
|
||||
await market.reserveSlot(request.id, 1.u256)
|
||||
await market.reserveSlot(request.id, 2.u256)
|
||||
await market.fillSlot(request.id, 0.u256, proof, request.ask.collateral)
|
||||
await market.fillSlot(request.id, 1.u256, proof, request.ask.collateral)
|
||||
await market.fillSlot(request.id, 2.u256, proof, request.ask.collateral)
|
||||
|
||||
await ethProvider.advanceTime(10.u256)
|
||||
|
||||
let (_, fromTime) =
|
||||
await ethProvider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
|
||||
let events = await market.queryPastSlotFilledEvents(
|
||||
fromTime = fromTime.truncate(SecondsSince1970))
|
||||
|
||||
check events.len == 0
|
||||
|
||||
test "past event query can specify negative `blocksAgo` parameter":
|
||||
await market.requestStorage(request)
|
||||
|
||||
check eventually (
|
||||
(await market.queryPastEvents(StorageRequested, blocksAgo = -2)) ==
|
||||
(await market.queryPastEvents(StorageRequested, blocksAgo = 2))
|
||||
(await market.queryPastStorageRequestedEvents(blocksAgo = -2)) ==
|
||||
(await market.queryPastStorageRequestedEvents(blocksAgo = 2))
|
||||
)
|
||||
|
||||
test "pays rewards and collateral to host":
|
||||
|
|
|
@ -0,0 +1,163 @@
|
|||
import pkg/chronos
|
||||
import codex/contracts
|
||||
import ../asynctest
|
||||
import ../ethertest
|
||||
import ./time
|
||||
import ./helpers/mockprovider
|
||||
|
||||
# to see supportive information in the test output
|
||||
# use `-d:"chronicles_enabled_topics:testProvider:DEBUG` option
|
||||
# when compiling the test file
|
||||
logScope:
|
||||
topics = "testProvider"
|
||||
|
||||
suite "Provider (Mock)":
|
||||
test "blockNumberForEpoch returns the earliest block when its timestamp " &
|
||||
"is greater than the given epoch time and the earliest block is not " &
|
||||
"block number 0 (genesis block)":
|
||||
let mockProvider = newMockProvider(
|
||||
numberOfBlocks = 10,
|
||||
earliestBlockNumber = 1,
|
||||
earliestBlockTimestamp = 10,
|
||||
timeIntervalBetweenBlocks = 10
|
||||
)
|
||||
|
||||
let (earliestBlockNumber, earliestTimestamp) =
|
||||
await mockProvider.blockNumberAndTimestamp(BlockTag.earliest)
|
||||
|
||||
let epochTime = earliestTimestamp - 1
|
||||
|
||||
let actual = await mockProvider.blockNumberForEpoch(
|
||||
epochTime.truncate(SecondsSince1970))
|
||||
|
||||
check actual == earliestBlockNumber
|
||||
|
||||
test "blockNumberForEpoch returns the earliest block when its timestamp " &
|
||||
"is equal to the given epoch time":
|
||||
let mockProvider = newMockProvider(
|
||||
numberOfBlocks = 10,
|
||||
earliestBlockNumber = 0,
|
||||
earliestBlockTimestamp = 10,
|
||||
timeIntervalBetweenBlocks = 10
|
||||
)
|
||||
|
||||
let (earliestBlockNumber, earliestTimestamp) =
|
||||
await mockProvider.blockNumberAndTimestamp(BlockTag.earliest)
|
||||
|
||||
let epochTime = earliestTimestamp
|
||||
|
||||
let actual = await mockProvider.blockNumberForEpoch(
|
||||
epochTime.truncate(SecondsSince1970))
|
||||
|
||||
check earliestBlockNumber == 0.u256
|
||||
check actual == earliestBlockNumber
|
||||
|
||||
test "blockNumberForEpoch returns the latest block when its timestamp " &
|
||||
"is equal to the given epoch time":
|
||||
let mockProvider = newMockProvider(
|
||||
numberOfBlocks = 10,
|
||||
earliestBlockNumber = 0,
|
||||
earliestBlockTimestamp = 10,
|
||||
timeIntervalBetweenBlocks = 10
|
||||
)
|
||||
|
||||
let (latestBlockNumber, latestTimestamp) =
|
||||
await mockProvider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
|
||||
let epochTime = latestTimestamp
|
||||
|
||||
let actual = await mockProvider.blockNumberForEpoch(
|
||||
epochTime.truncate(SecondsSince1970))
|
||||
|
||||
check actual == latestBlockNumber
|
||||
|
||||
ethersuite "Provider":
|
||||
proc mineNBlocks(provider: JsonRpcProvider, n: int) {.async.} =
|
||||
for _ in 0..<n:
|
||||
discard await provider.send("evm_mine")
|
||||
|
||||
test "blockNumberForEpoch finds closest blockNumber for given epoch time":
|
||||
proc createBlockHistory(n: int, blockTime: int):
|
||||
Future[seq[(UInt256, UInt256)]] {.async.} =
|
||||
var blocks: seq[(UInt256, UInt256)] = @[]
|
||||
for _ in 0..<n:
|
||||
await ethProvider.advanceTime(blockTime.u256)
|
||||
let (blockNumber, blockTimestamp) =
|
||||
await ethProvider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
# collect blocknumbers and timestamps
|
||||
blocks.add((blockNumber, blockTimestamp))
|
||||
blocks
|
||||
|
||||
proc printBlockNumbersAndTimestamps(blocks: seq[(UInt256, UInt256)]) =
|
||||
for (blockNumber, blockTimestamp) in blocks:
|
||||
debug "Block", blockNumber = blockNumber, timestamp = blockTimestamp
|
||||
|
||||
type Expectations = tuple
|
||||
epochTime: UInt256
|
||||
expectedBlockNumber: UInt256
|
||||
|
||||
# We want to test that timestamps at the block boundaries, in the middle,
|
||||
# and towards lower and upper part of the range are correctly mapped to
|
||||
# the closest block number.
|
||||
# For example: assume we have the following two blocks with
|
||||
# the corresponding block numbers and timestamps:
|
||||
# block1: (291, 1728436100)
|
||||
# block2: (292, 1728436110)
|
||||
# To test that binary search correctly finds the closest block number,
|
||||
# we will test the following timestamps:
|
||||
# 1728436100 => 291
|
||||
# 1728436104 => 291
|
||||
# 1728436105 => 292
|
||||
# 1728436106 => 292
|
||||
# 1728436110 => 292
|
||||
proc generateExpectations(
|
||||
blocks: seq[(UInt256, UInt256)]): seq[Expectations] =
|
||||
var expectations: seq[Expectations] = @[]
|
||||
for i in 0..<blocks.len - 1:
|
||||
let (startNumber, startTimestamp) = blocks[i]
|
||||
let (endNumber, endTimestamp) = blocks[i + 1]
|
||||
let middleTimestamp = (startTimestamp + endTimestamp) div 2
|
||||
let lowerExpectation = (middleTimestamp - 1, startNumber)
|
||||
expectations.add((startTimestamp, startNumber))
|
||||
expectations.add(lowerExpectation)
|
||||
if middleTimestamp.truncate(int64) - startTimestamp.truncate(int64) <
|
||||
endTimestamp.truncate(int64) - middleTimestamp.truncate(int64):
|
||||
expectations.add((middleTimestamp, startNumber))
|
||||
else:
|
||||
expectations.add((middleTimestamp, endNumber))
|
||||
let higherExpectation = (middleTimestamp + 1, endNumber)
|
||||
expectations.add(higherExpectation)
|
||||
if i == blocks.len - 2:
|
||||
expectations.add((endTimestamp, endNumber))
|
||||
expectations
|
||||
|
||||
proc printExpectations(expectations: seq[Expectations]) =
|
||||
debug "Expectations", numberOfExpectations = expectations.len
|
||||
for (epochTime, expectedBlockNumber) in expectations:
|
||||
debug "Expectation", epochTime = epochTime,
|
||||
expectedBlockNumber = expectedBlockNumber
|
||||
|
||||
# mark the beginning of the history for our test
|
||||
await ethProvider.mineNBlocks(1)
|
||||
|
||||
# set average block time - 10s - we use larger block time
|
||||
# then expected in Linea for more precise testing of the binary search
|
||||
let averageBlockTime = 10
|
||||
|
||||
# create a history of N blocks
|
||||
let N = 10
|
||||
let blocks = await createBlockHistory(N, averageBlockTime)
|
||||
|
||||
printBlockNumbersAndTimestamps(blocks)
|
||||
|
||||
# generate expectations for block numbers
|
||||
let expectations = generateExpectations(blocks)
|
||||
printExpectations(expectations)
|
||||
|
||||
# validate expectations
|
||||
for (epochTime, expectedBlockNumber) in expectations:
|
||||
debug "Validating", epochTime = epochTime,
|
||||
expectedBlockNumber = expectedBlockNumber
|
||||
let actualBlockNumber = await ethProvider.blockNumberForEpoch(
|
||||
epochTime.truncate(SecondsSince1970))
|
||||
check actualBlockNumber == expectedBlockNumber
|
|
@ -20,9 +20,11 @@ type CodexClient* = ref object
|
|||
|
||||
type CodexClientError* = object of CatchableError
|
||||
|
||||
const HttpClientTimeoutMs = 60 * 1000
|
||||
|
||||
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
||||
CodexClient(
|
||||
http: newHttpClient(),
|
||||
http: newHttpClient(timeout=HttpClientTimeoutMs),
|
||||
baseurl: baseurl,
|
||||
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline})
|
||||
)
|
||||
|
@ -247,7 +249,7 @@ proc close*(client: CodexClient) =
|
|||
|
||||
proc restart*(client: CodexClient) =
|
||||
client.http.close()
|
||||
client.http = newHttpClient()
|
||||
client.http = newHttpClient(timeout=HttpClientTimeoutMs)
|
||||
|
||||
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool =
|
||||
client.getPurchase(id).option.?state == some state
|
||||
|
|
|
@ -239,6 +239,51 @@ proc withSimulateProofFailures*(
|
|||
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
|
||||
return startConfig
|
||||
|
||||
proc withValidationGroups*(
|
||||
self: CodexConfigs,
|
||||
groups: ValidationGroups): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.addCliOption(
|
||||
StartUpCmd.persistence, "--validator-groups", $(groups))
|
||||
return startConfig
|
||||
|
||||
proc withValidationGroupIndex*(
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
groupIndex: uint16): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].addCliOption(
|
||||
StartUpCmd.persistence, "--validator-group-index", $groupIndex)
|
||||
return startConfig
|
||||
|
||||
proc withEthProvider*(
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
ethProvider: string
|
||||
): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].addCliOption(StartUpCmd.persistence,
|
||||
"--eth-provider", ethProvider)
|
||||
return startConfig
|
||||
|
||||
proc withEthProvider*(
|
||||
self: CodexConfigs,
|
||||
ethProvider: string): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.addCliOption(StartUpCmd.persistence,
|
||||
"--eth-provider", ethProvider)
|
||||
return startConfig
|
||||
|
||||
proc logLevelWithTopics(
|
||||
config: CodexConfig,
|
||||
topics: varargs[string]): string {.raises: [CodexConfigError].} =
|
||||
|
|
|
@ -34,10 +34,10 @@ method startedOutput(node: CodexProcess): string =
|
|||
method processOptions(node: CodexProcess): set[AsyncProcessOption] =
|
||||
return {AsyncProcessOption.StdErrToStdOut}
|
||||
|
||||
method outputLineEndings(node: CodexProcess): string =
|
||||
method outputLineEndings(node: CodexProcess): string {.raises: [].} =
|
||||
return "\n"
|
||||
|
||||
method onOutputLineCaptured(node: CodexProcess, line: string) =
|
||||
method onOutputLineCaptured(node: CodexProcess, line: string) {.raises: [].} =
|
||||
discard
|
||||
|
||||
proc dataDir(node: CodexProcess): string =
|
||||
|
|
|
@ -37,7 +37,7 @@ method startedOutput(node: HardhatProcess): string =
|
|||
method processOptions(node: HardhatProcess): set[AsyncProcessOption] =
|
||||
return {}
|
||||
|
||||
method outputLineEndings(node: HardhatProcess): string =
|
||||
method outputLineEndings(node: HardhatProcess): string {.raises: [].} =
|
||||
return "\n"
|
||||
|
||||
proc openLogFile(node: HardhatProcess, logFilePath: string): IoHandle =
|
||||
|
|
|
@ -11,7 +11,8 @@ import ../contracts/deployment
|
|||
export mp
|
||||
export multinodes
|
||||
|
||||
template marketplacesuite*(name: string, body: untyped) =
|
||||
template marketplacesuite*(name: string,
|
||||
body: untyped) =
|
||||
|
||||
multinodesuite name:
|
||||
|
||||
|
|
|
@ -61,9 +61,29 @@ proc nextFreePort(startPort: int): Future[int] {.async.} =
|
|||
template multinodesuite*(name: string, body: untyped) =
|
||||
|
||||
asyncchecksuite name:
|
||||
|
||||
var running: seq[RunningNode]
|
||||
var bootstrap: string
|
||||
# Following the problem described here:
|
||||
# https://github.com/NomicFoundation/hardhat/issues/2053
|
||||
# It may be desirable to use http RPC provider.
|
||||
# This turns out to be equally important in tests where
|
||||
# subscriptions get wiped out after 5mins even when
|
||||
# a new block is mined.
|
||||
# For this reason, we are using http provider here as the default.
|
||||
# To use a different provider in your test, you may use
|
||||
# multinodesuiteWithProviderUrl template in your tests.
|
||||
# If you want to use a different provider url in the nodes, you can
|
||||
# use withEthProvider config modifier in the node config
|
||||
# to set the desired provider url. E.g.:
|
||||
# NodeConfigs(
|
||||
# hardhat:
|
||||
# HardhatConfig.none,
|
||||
# clients:
|
||||
# CodexConfigs.init(nodes=1)
|
||||
# .withEthProvider("ws://localhost:8545")
|
||||
# .some,
|
||||
# ...
|
||||
let jsonRpcProviderUrl = "http://127.0.0.1:8545"
|
||||
var running {.inject, used.}: seq[RunningNode]
|
||||
var bootstrapNodes: seq[string]
|
||||
let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss")
|
||||
var currentTestName = ""
|
||||
var nodeConfigs: NodeConfigs
|
||||
|
@ -142,6 +162,8 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
let updatedLogFile = getLogFile(role, some roleIdx)
|
||||
config.withLogFile(updatedLogFile)
|
||||
|
||||
for bootstrapNode in bootstrapNodes:
|
||||
config.addCliOption("--bootstrap-node", bootstrapNode)
|
||||
config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx))
|
||||
config.addCliOption("--data-dir", datadir)
|
||||
config.addCliOption("--nat", "127.0.0.1")
|
||||
|
@ -196,15 +218,14 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
proc startClientNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let clientIdx = clients().len
|
||||
var config = conf
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", "http://127.0.0.1:8545")
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
return await newCodexProcess(clientIdx, config, Role.Client)
|
||||
|
||||
proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let providerIdx = providers().len
|
||||
var config = conf
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", "http://127.0.0.1:8545")
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
config.addCliOption(PersistenceCmd.prover, "--circom-r1cs",
|
||||
"vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs")
|
||||
|
@ -218,8 +239,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let validatorIdx = validators().len
|
||||
var config = conf
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", "http://127.0.0.1:8545")
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
config.addCliOption(StartUpCmd.persistence, "--validator")
|
||||
|
||||
|
@ -253,6 +273,13 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
fail()
|
||||
quit(1)
|
||||
|
||||
proc updateBootstrapNodes(node: CodexProcess) =
|
||||
without ninfo =? node.client.info():
|
||||
# raise CatchableError instead of Defect (with .get or !) so we
|
||||
# can gracefully shutdown and prevent zombies
|
||||
raiseMultiNodeSuiteError "Failed to get node info"
|
||||
bootstrapNodes.add ninfo["spr"].getStr()
|
||||
|
||||
setup:
|
||||
if var conf =? nodeConfigs.hardhat:
|
||||
try:
|
||||
|
@ -268,7 +295,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
# Do not use websockets, but use http and polling to stop subscriptions
|
||||
# from being removed after 5 minutes
|
||||
ethProvider = JsonRpcProvider.new(
|
||||
"http://127.0.0.1:8545",
|
||||
jsonRpcProviderUrl,
|
||||
pollingInterval = chronos.milliseconds(100)
|
||||
)
|
||||
# if hardhat was NOT started by the test, take a snapshot so it can be
|
||||
|
@ -291,12 +318,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
role: Role.Client,
|
||||
node: node
|
||||
)
|
||||
if clients().len == 1:
|
||||
without ninfo =? CodexProcess(node).client.info():
|
||||
# raise CatchableError instead of Defect (with .get or !) so we
|
||||
# can gracefully shutdown and prevent zombies
|
||||
raiseMultiNodeSuiteError "Failed to get node info"
|
||||
bootstrap = ninfo["spr"].getStr()
|
||||
CodexProcess(node).updateBootstrapNodes()
|
||||
|
||||
if var providers =? nodeConfigs.providers:
|
||||
failAndTeardownOnError "failed to start provider nodes":
|
||||
|
@ -306,6 +328,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
role: Role.Provider,
|
||||
node: node
|
||||
)
|
||||
CodexProcess(node).updateBootstrapNodes()
|
||||
|
||||
if var validators =? nodeConfigs.validators:
|
||||
failAndTeardownOnError "failed to start validator nodes":
|
||||
|
|
|
@ -38,10 +38,10 @@ method startedOutput(node: NodeProcess): string {.base, gcsafe.} =
|
|||
method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base, gcsafe.} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method outputLineEndings(node: NodeProcess): string {.base, gcsafe.} =
|
||||
method outputLineEndings(node: NodeProcess): string {.base, gcsafe raises: [].} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method onOutputLineCaptured(node: NodeProcess, line: string) {.base, gcsafe.} =
|
||||
method onOutputLineCaptured(node: NodeProcess, line: string) {.base, gcsafe, raises: [].} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method start*(node: NodeProcess) {.base, async.} =
|
||||
|
@ -74,7 +74,7 @@ proc captureOutput(
|
|||
node: NodeProcess,
|
||||
output: string,
|
||||
started: Future[void]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
|
||||
logScope:
|
||||
nodeName = node.name
|
||||
|
@ -98,7 +98,10 @@ proc captureOutput(
|
|||
await sleepAsync(1.millis)
|
||||
await sleepAsync(1.millis)
|
||||
|
||||
except AsyncStreamReadError as e:
|
||||
except CancelledError:
|
||||
discard # do not propagate as captureOutput was asyncSpawned
|
||||
|
||||
except AsyncStreamError as e:
|
||||
error "error reading output stream", error = e.msgDetail
|
||||
|
||||
proc startNode*[T: NodeProcess](
|
||||
|
@ -147,16 +150,22 @@ method stop*(node: NodeProcess) {.base, async.} =
|
|||
|
||||
trace "node stopped"
|
||||
|
||||
proc waitUntilStarted*(node: NodeProcess) {.async.} =
|
||||
proc waitUntilOutput*(node: NodeProcess, output: string) {.async.} =
|
||||
logScope:
|
||||
nodeName = node.name
|
||||
|
||||
trace "waiting until node started"
|
||||
trace "waiting until", output
|
||||
|
||||
let started = newFuture[void]()
|
||||
let fut = node.captureOutput(output, started)
|
||||
node.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
await started.wait(60.seconds) # allow enough time for proof generation
|
||||
|
||||
proc waitUntilStarted*(node: NodeProcess) {.async.} =
|
||||
try:
|
||||
discard node.captureOutput(node.startedOutput, started).track(node)
|
||||
await started.wait(35.seconds) # allow enough time for proof generation
|
||||
await node.waitUntilOutput(node.startedOutput)
|
||||
trace "node started"
|
||||
except AsyncTimeoutError:
|
||||
# attempt graceful shutdown in case node was partially started, prevent
|
||||
# zombies
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
import std/osproc
|
||||
import std/os
|
||||
import std/streams
|
||||
import std/strutils
|
||||
import pkg/codex/conf
|
||||
import pkg/codex/logutils
|
||||
import pkg/confutils
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
import ./codexclient
|
||||
|
||||
export codexclient
|
||||
|
||||
const workingDir = currentSourcePath() / ".." / ".." / ".."
|
||||
const executable = "build" / "codex"
|
||||
|
||||
type
|
||||
NodeProcess* = ref object
|
||||
process: Process
|
||||
arguments: seq[string]
|
||||
debug: bool
|
||||
client: ?CodexClient
|
||||
|
||||
proc start(node: NodeProcess) =
|
||||
if node.debug:
|
||||
node.process = osproc.startProcess(
|
||||
executable,
|
||||
workingDir,
|
||||
node.arguments,
|
||||
options={poParentStreams}
|
||||
)
|
||||
else:
|
||||
node.process = osproc.startProcess(
|
||||
executable,
|
||||
workingDir,
|
||||
node.arguments
|
||||
)
|
||||
|
||||
proc waitUntilOutput*(node: NodeProcess, output: string) =
|
||||
if node.debug:
|
||||
raiseAssert "cannot read node output when in debug mode"
|
||||
for line in node.process.outputStream.lines:
|
||||
if line.contains(output):
|
||||
return
|
||||
raiseAssert "node did not output '" & output & "'"
|
||||
|
||||
proc waitUntilStarted*(node: NodeProcess) =
|
||||
if node.debug:
|
||||
sleep(10_000)
|
||||
else:
|
||||
node.waitUntilOutput("Started codex node")
|
||||
|
||||
proc startNode*(args: openArray[string], debug: string | bool = false): NodeProcess =
|
||||
## Starts a Codex Node with the specified arguments.
|
||||
## Set debug to 'true' to see output of the node.
|
||||
let node = NodeProcess(arguments: @args, debug: ($debug != "false"))
|
||||
node.start()
|
||||
node
|
||||
|
||||
proc dataDir(node: NodeProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
config.dataDir.string
|
||||
|
||||
proc apiUrl(node: NodeProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
|
||||
|
||||
proc client*(node: NodeProcess): CodexClient =
|
||||
if client =? node.client:
|
||||
return client
|
||||
let client = CodexClient.new(node.apiUrl)
|
||||
node.client = some client
|
||||
client
|
||||
|
||||
proc stop*(node: NodeProcess) =
|
||||
if node.process != nil:
|
||||
node.process.terminate()
|
||||
discard node.process.waitForExit(timeout=5_000)
|
||||
node.process.close()
|
||||
node.process = nil
|
||||
if client =? node.client:
|
||||
node.client = none CodexClient
|
||||
client.close()
|
||||
|
||||
proc restart*(node: NodeProcess) =
|
||||
node.stop()
|
||||
node.start()
|
||||
node.waitUntilStarted()
|
||||
|
||||
proc removeDataDir*(node: NodeProcess) =
|
||||
removeDir(node.dataDir)
|
|
@ -5,10 +5,11 @@ from std/net import TimeoutError
|
|||
|
||||
import pkg/chronos
|
||||
import ../ethertest
|
||||
import ./nodes
|
||||
import ./codexprocess
|
||||
import ./nodeprocess
|
||||
|
||||
ethersuite "Node block expiration tests":
|
||||
var node: NodeProcess
|
||||
var node: CodexProcess
|
||||
var baseurl: string
|
||||
|
||||
let dataDir = getTempDir() / "Codex1"
|
||||
|
@ -18,12 +19,12 @@ ethersuite "Node block expiration tests":
|
|||
baseurl = "http://localhost:8080/api/codex/v1"
|
||||
|
||||
teardown:
|
||||
node.stop()
|
||||
await node.stop()
|
||||
|
||||
dataDir.removeDir()
|
||||
|
||||
proc startTestNode(blockTtlSeconds: int) =
|
||||
node = startNode([
|
||||
proc startTestNode(blockTtlSeconds: int) {.async.} =
|
||||
node = await CodexProcess.startNode(@[
|
||||
"--api-port=8080",
|
||||
"--data-dir=" & dataDir,
|
||||
"--nat=127.0.0.1",
|
||||
|
@ -32,9 +33,11 @@ ethersuite "Node block expiration tests":
|
|||
"--disc-port=8090",
|
||||
"--block-ttl=" & $blockTtlSeconds,
|
||||
"--block-mi=1",
|
||||
"--block-mn=10"
|
||||
], debug = false)
|
||||
node.waitUntilStarted()
|
||||
"--block-mn=10"],
|
||||
false,
|
||||
"cli-test-node"
|
||||
)
|
||||
await node.waitUntilStarted()
|
||||
|
||||
proc uploadTestFile(): string =
|
||||
let client = newHttpClient()
|
||||
|
@ -61,7 +64,7 @@ ethersuite "Node block expiration tests":
|
|||
content.code == Http200
|
||||
|
||||
test "node retains not-expired file":
|
||||
startTestNode(blockTtlSeconds = 10)
|
||||
await startTestNode(blockTtlSeconds = 10)
|
||||
|
||||
let contentId = uploadTestFile()
|
||||
|
||||
|
@ -74,7 +77,7 @@ ethersuite "Node block expiration tests":
|
|||
response.body == content
|
||||
|
||||
test "node deletes expired file":
|
||||
startTestNode(blockTtlSeconds = 1)
|
||||
await startTestNode(blockTtlSeconds = 1)
|
||||
|
||||
let contentId = uploadTestFile()
|
||||
|
||||
|
|
|
@ -1,29 +1,38 @@
|
|||
import std/unittest
|
||||
import std/tempfiles
|
||||
import codex/conf
|
||||
import codex/utils/fileutils
|
||||
import ./nodes
|
||||
import ../asynctest
|
||||
import ../checktest
|
||||
import ./codexprocess
|
||||
import ./nodeprocess
|
||||
import ../examples
|
||||
|
||||
suite "Command line interface":
|
||||
asyncchecksuite "Command line interface":
|
||||
|
||||
let key = "4242424242424242424242424242424242424242424242424242424242424242"
|
||||
|
||||
proc startCodex(args: seq[string]): Future[CodexProcess] {.async.} =
|
||||
return await CodexProcess.startNode(
|
||||
args,
|
||||
false,
|
||||
"cli-test-node"
|
||||
)
|
||||
|
||||
test "complains when persistence is enabled without ethereum account":
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence"
|
||||
])
|
||||
node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
|
||||
node.stop()
|
||||
await node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
|
||||
await node.stop()
|
||||
|
||||
test "complains when ethereum private key file has wrong permissions":
|
||||
let unsafeKeyFile = genTempPath("", "")
|
||||
discard unsafeKeyFile.writeFile(key, 0o666)
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence",
|
||||
"--eth-private-key=" & unsafeKeyFile])
|
||||
node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
|
||||
node.stop()
|
||||
await node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
|
||||
await node.stop()
|
||||
discard removeFile(unsafeKeyFile)
|
||||
|
||||
let
|
||||
|
@ -31,27 +40,27 @@ suite "Command line interface":
|
|||
expectedDownloadInstruction = "Proving circuit files are not found. Please run the following to download them:"
|
||||
|
||||
test "suggests downloading of circuit files when persistence is enabled without accessible r1cs file":
|
||||
let node = startNode(@["persistence", "prover", marketplaceArg])
|
||||
node.waitUntilOutput(expectedDownloadInstruction)
|
||||
node.stop()
|
||||
let node = await startCodex(@["persistence", "prover", marketplaceArg])
|
||||
await node.waitUntilOutput(expectedDownloadInstruction)
|
||||
await node.stop()
|
||||
|
||||
test "suggests downloading of circuit files when persistence is enabled without accessible wasm file":
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence",
|
||||
"prover",
|
||||
marketplaceArg,
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs"
|
||||
])
|
||||
node.waitUntilOutput(expectedDownloadInstruction)
|
||||
node.stop()
|
||||
await node.waitUntilOutput(expectedDownloadInstruction)
|
||||
await node.stop()
|
||||
|
||||
test "suggests downloading of circuit files when persistence is enabled without accessible zkey file":
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence",
|
||||
"prover",
|
||||
marketplaceArg,
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm"
|
||||
])
|
||||
node.waitUntilOutput(expectedDownloadInstruction)
|
||||
node.stop()
|
||||
await node.waitUntilOutput(expectedDownloadInstruction)
|
||||
await node.stop()
|
||||
|
|
|
@ -33,12 +33,9 @@ marketplacesuite "Bug #821 - node crashes during erasure coding":
|
|||
let cid = clientApi.upload(data).get
|
||||
|
||||
var requestId = none RequestId
|
||||
proc onStorageRequested(event: ?!StorageRequested) {.raises:[].} =
|
||||
without value =? event:
|
||||
trace "The onSlotFilled event is not defined."
|
||||
discard
|
||||
|
||||
requestId = value.requestId.some
|
||||
proc onStorageRequested(eventResult: ?!StorageRequested)=
|
||||
assert not eventResult.isErr
|
||||
requestId = some (!eventResult).requestId
|
||||
|
||||
let subscription = await marketplace.subscribe(StorageRequested, onStorageRequested)
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
import pkg/stew/byteutils
|
||||
import pkg/codex/units
|
||||
import ../examples
|
||||
import ../contracts/time
|
||||
import ../contracts/deployment
|
||||
|
@ -7,22 +5,37 @@ import ./marketplacesuite
|
|||
import ./twonodes
|
||||
import ./nodeconfigs
|
||||
|
||||
twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
||||
marketplacesuite "Marketplace":
|
||||
let marketplaceConfig = NodeConfigs(
|
||||
clients: CodexConfigs.init(nodes=1).some,
|
||||
providers: CodexConfigs.init(nodes=1).some,
|
||||
)
|
||||
|
||||
var host: CodexClient
|
||||
var hostAccount: Address
|
||||
var client: CodexClient
|
||||
var clientAccount: Address
|
||||
|
||||
setup:
|
||||
host = providers()[0].client
|
||||
hostAccount = providers()[0].ethAccount
|
||||
client = clients()[0].client
|
||||
clientAccount = clients()[0].ethAccount
|
||||
|
||||
# Our Hardhat configuration does use automine, which means that time tracked by `ethProvider.currentTime()` is not
|
||||
# advanced until blocks are mined and that happens only when transaction is submitted.
|
||||
# As we use in tests ethProvider.currentTime() which uses block timestamp this can lead to synchronization issues.
|
||||
await ethProvider.advanceTime(1.u256)
|
||||
|
||||
test "nodes negotiate contracts on the marketplace":
|
||||
test "nodes negotiate contracts on the marketplace", marketplaceConfig:
|
||||
let size = 0xFFFFFF.u256
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
# client 2 makes storage available
|
||||
let availability = client2.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
# host makes storage available
|
||||
let availability = host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
# client 1 requests storage
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(
|
||||
# client requests storage
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration=20*60.u256,
|
||||
reward=400.u256,
|
||||
|
@ -32,19 +45,19 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
nodes = 3,
|
||||
tolerance = 1).get
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client1.getPurchase(id).get
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client.getPurchase(id).get
|
||||
check purchase.error == none string
|
||||
let availabilities = client2.getAvailabilities().get
|
||||
let availabilities = host.getAvailabilities().get
|
||||
check availabilities.len == 1
|
||||
let newSize = availabilities[0].freeSize
|
||||
check newSize > 0 and newSize < size
|
||||
|
||||
let reservations = client2.getAvailabilityReservations(availability.id).get
|
||||
let reservations = host.getAvailabilityReservations(availability.id).get
|
||||
check reservations.len == 3
|
||||
check reservations[0].requestId == purchase.requestId
|
||||
|
||||
test "node slots gets paid out and rest of tokens are returned to client":
|
||||
test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig:
|
||||
let size = 0xFFFFFF.u256
|
||||
let data = await RandomChunker.example(blocks = 8)
|
||||
let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner())
|
||||
|
@ -54,13 +67,13 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
let duration = 20*60.u256
|
||||
let nodes = 3'u
|
||||
|
||||
# client 2 makes storage available
|
||||
let startBalanceHost = await token.balanceOf(account2)
|
||||
discard client2.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
# host makes storage available
|
||||
let startBalanceHost = await token.balanceOf(hostAccount)
|
||||
discard host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
# client 1 requests storage
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(
|
||||
# client requests storage
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration=duration,
|
||||
reward=reward,
|
||||
|
@ -70,11 +83,11 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
nodes = nodes,
|
||||
tolerance = 1).get
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client1.getPurchase(id).get
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client.getPurchase(id).get
|
||||
check purchase.error == none string
|
||||
|
||||
let clientBalanceBeforeFinished = await token.balanceOf(account1)
|
||||
let clientBalanceBeforeFinished = await token.balanceOf(clientAccount)
|
||||
|
||||
# Proving mechanism uses blockchain clock to do proving/collect/cleanup round
|
||||
# hence we must use `advanceTime` over `sleepAsync` as Hardhat does mine new blocks
|
||||
|
@ -82,11 +95,11 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
await ethProvider.advanceTime(duration)
|
||||
|
||||
# Checking that the hosting node received reward for at least the time between <expiry;end>
|
||||
check eventually (await token.balanceOf(account2)) - startBalanceHost >= (duration-5*60)*reward*nodes.u256
|
||||
check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= (duration-5*60)*reward*nodes.u256
|
||||
|
||||
# Checking that client node receives some funds back that were not used for the host nodes
|
||||
check eventually(
|
||||
(await token.balanceOf(account1)) - clientBalanceBeforeFinished > 0,
|
||||
(await token.balanceOf(clientAccount)) - clientBalanceBeforeFinished > 0,
|
||||
timeout = 10*1000 # give client a bit of time to withdraw its funds
|
||||
)
|
||||
|
||||
|
@ -135,13 +148,9 @@ marketplacesuite "Marketplace payouts":
|
|||
let cid = clientApi.upload(data).get
|
||||
|
||||
var slotIdxFilled = none UInt256
|
||||
|
||||
let onSlotFilled = proc (event: ?!SlotFilled) =
|
||||
without value =? event:
|
||||
trace "The onSlotFilled event is not defined."
|
||||
discard
|
||||
|
||||
slotIdxFilled = value.slotIndex.some
|
||||
proc onSlotFilled(eventResult: ?!SlotFilled) =
|
||||
assert not eventResult.isErr
|
||||
slotIdxFilled = some (!eventResult).slotIndex
|
||||
|
||||
let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
|
||||
|
||||
|
@ -164,6 +173,8 @@ marketplacesuite "Marketplace payouts":
|
|||
await ethProvider.advanceTime(expiry.u256)
|
||||
check eventually providerApi.saleStateIs(slotId, "SaleCancelled")
|
||||
|
||||
await advanceToNextPeriod()
|
||||
|
||||
check eventually (
|
||||
let endBalanceProvider = (await token.balanceOf(provider.ethAccount));
|
||||
endBalanceProvider > startBalanceProvider and
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from std/times import inMilliseconds
|
||||
import pkg/questionable
|
||||
import pkg/codex/logutils
|
||||
import pkg/stew/byteutils
|
||||
import ../contracts/time
|
||||
|
@ -55,7 +56,7 @@ marketplacesuite "Hosts submit regular proofs":
|
|||
|
||||
var proofWasSubmitted = false
|
||||
proc onProofSubmitted(event: ?!ProofSubmitted) =
|
||||
proofWasSubmitted = true
|
||||
proofWasSubmitted = event.isOk
|
||||
|
||||
let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted)
|
||||
|
||||
|
@ -121,11 +122,7 @@ marketplacesuite "Simulate invalid proofs":
|
|||
|
||||
var slotWasFreed = false
|
||||
proc onSlotFreed(event: ?!SlotFreed) =
|
||||
without value =? event:
|
||||
trace "The onSlotFreed event is not defined."
|
||||
discard
|
||||
|
||||
if value.requestId == requestId:
|
||||
if event.isOk and event.value.requestId == requestId:
|
||||
slotWasFreed = true
|
||||
|
||||
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
|
@ -180,12 +177,11 @@ marketplacesuite "Simulate invalid proofs":
|
|||
let requestId = client0.requestId(purchaseId).get
|
||||
|
||||
var slotWasFilled = false
|
||||
proc onSlotFilled(event: ?!SlotFilled) =
|
||||
without value =? event:
|
||||
trace "The onSlotFilled event is not defined."
|
||||
discard
|
||||
proc onSlotFilled(eventResult: ?!SlotFilled) =
|
||||
assert not eventResult.isErr
|
||||
let event = !eventResult
|
||||
|
||||
if value.requestId == requestId:
|
||||
if event.requestId == requestId:
|
||||
slotWasFilled = true
|
||||
let filledSubscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
|
||||
|
||||
|
@ -194,11 +190,7 @@ marketplacesuite "Simulate invalid proofs":
|
|||
|
||||
var slotWasFreed = false
|
||||
proc onSlotFreed(event: ?!SlotFreed) =
|
||||
without value =? event:
|
||||
trace "The onSlotFreed event is not defined."
|
||||
discard
|
||||
|
||||
if value.requestId == requestId:
|
||||
if event.isOk and event.value.requestId == requestId:
|
||||
slotWasFreed = true
|
||||
let freedSubscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
|
||||
|
|
|
@ -5,16 +5,16 @@ import ./twonodes
|
|||
import ../contracts/time
|
||||
import ../examples
|
||||
|
||||
twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
||||
twonodessuite "Purchasing":
|
||||
|
||||
test "node handles storage request":
|
||||
test "node handles storage request", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get
|
||||
let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get
|
||||
check id1 != id2
|
||||
|
||||
test "node retrieves purchase status":
|
||||
test "node retrieves purchase status", twoNodesConfig:
|
||||
# get one contiguous chunk
|
||||
let rng = rng.Rng.instance()
|
||||
let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2)
|
||||
|
@ -40,7 +40,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
check request.ask.maxSlotLoss == 1'u64
|
||||
|
||||
# TODO: We currently do not support encoding single chunks
|
||||
# test "node retrieves purchase status with 1 chunk":
|
||||
# test "node retrieves purchase status with 1 chunk", twoNodesConfig:
|
||||
# let cid = client1.upload("some file contents").get
|
||||
# let id = client1.requestStorage(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, expiry=30, collateral=200.u256, nodes=2, tolerance=1).get
|
||||
# let request = client1.getPurchase(id).get.request.get
|
||||
|
@ -52,7 +52,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
# check request.ask.slots == 3'u64
|
||||
# check request.ask.maxSlotLoss == 1'u64
|
||||
|
||||
test "node remembers purchase status after restart":
|
||||
test "node remembers purchase status after restart", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(cid,
|
||||
|
@ -65,7 +65,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
tolerance=1.uint).get
|
||||
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000)
|
||||
|
||||
node1.restart()
|
||||
await node1.restart()
|
||||
client1.restart()
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000)
|
||||
|
@ -78,7 +78,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
check request.ask.slots == 3'u64
|
||||
check request.ask.maxSlotLoss == 1'u64
|
||||
|
||||
test "node requires expiry and its value to be in future":
|
||||
test "node requires expiry and its value to be in future", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
|
||||
|
|
|
@ -6,20 +6,20 @@ import ./twonodes
|
|||
import ../examples
|
||||
import json
|
||||
|
||||
twonodessuite "REST API", debug1 = false, debug2 = false:
|
||||
test "nodes can print their peer information":
|
||||
twonodessuite "REST API":
|
||||
test "nodes can print their peer information", twoNodesConfig:
|
||||
check !client1.info() != !client2.info()
|
||||
|
||||
test "nodes can set chronicles log level":
|
||||
test "nodes can set chronicles log level", twoNodesConfig:
|
||||
client1.setLogLevel("DEBUG;TRACE:codex")
|
||||
|
||||
test "node accepts file uploads":
|
||||
test "node accepts file uploads", twoNodesConfig:
|
||||
let cid1 = client1.upload("some file contents").get
|
||||
let cid2 = client1.upload("some other contents").get
|
||||
|
||||
check cid1 != cid2
|
||||
|
||||
test "node shows used and available space":
|
||||
test "node shows used and available space", twoNodesConfig:
|
||||
discard client1.upload("some file contents").get
|
||||
discard client1.postAvailability(totalSize=12.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
let space = client1.space().tryGet()
|
||||
|
@ -29,7 +29,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
space.quotaUsedBytes == 65598.NBytes
|
||||
space.quotaReservedBytes == 12.NBytes
|
||||
|
||||
test "node lists local files":
|
||||
test "node lists local files", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
|
@ -40,7 +40,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check:
|
||||
[cid1, cid2].allIt(it in list.content.mapIt(it.cid))
|
||||
|
||||
test "request storage fails for datasets that are too small":
|
||||
test "request storage fails for datasets that are too small", twoNodesConfig:
|
||||
let cid = client1.upload("some file contents").get
|
||||
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
|
||||
|
||||
|
@ -48,7 +48,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
response.status == "400 Bad Request"
|
||||
response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes"
|
||||
|
||||
test "request storage succeeds for sufficiently sized datasets":
|
||||
test "request storage succeeds for sufficiently sized datasets", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
|
||||
|
@ -56,7 +56,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check:
|
||||
response.status == "200 OK"
|
||||
|
||||
test "request storage fails if tolerance is zero":
|
||||
test "request storage fails if tolerance is zero", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -79,7 +79,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Tolerance needs to be bigger then zero"
|
||||
|
||||
test "request storage fails if nodes and tolerance aren't correct":
|
||||
test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -104,7 +104,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
|
||||
|
||||
test "request storage fails if tolerance > nodes (underflow protection)":
|
||||
test "request storage fails if tolerance > nodes (underflow protection)", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -129,7 +129,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`"
|
||||
|
||||
test "request storage succeeds if nodes and tolerance within range":
|
||||
test "request storage succeeds if nodes and tolerance within range", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -153,42 +153,42 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
|
||||
check responseBefore.status == "200 OK"
|
||||
|
||||
test "node accepts file uploads with content type":
|
||||
test "node accepts file uploads with content type", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "node accepts file uploads with content disposition":
|
||||
test "node accepts file uploads with content disposition", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "node accepts file uploads with content disposition without filename":
|
||||
test "node accepts file uploads with content disposition without filename", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "upload fails if content disposition contains bad filename":
|
||||
test "upload fails if content disposition contains bad filename", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "422 Unprocessable Entity"
|
||||
check response.body == "The filename is not valid."
|
||||
|
||||
test "upload fails if content type is invalid":
|
||||
test "upload fails if content type is invalid", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "hello/world"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "422 Unprocessable Entity"
|
||||
check response.body == "The MIME type is not valid."
|
||||
|
||||
test "node retrieve the metadata":
|
||||
test "node retrieve the metadata", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain", "Content-Disposition": "attachment; filename=\"example.txt\""})
|
||||
let uploadResponse = client1.uploadRaw("some file contents", headers)
|
||||
let cid = uploadResponse.body
|
||||
|
@ -211,7 +211,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check manifest.hasKey("uploadedAt") == true
|
||||
check manifest["uploadedAt"].getInt() > 0
|
||||
|
||||
test "node set the headers when for download":
|
||||
test "node set the headers when for download", twoNodesConfig:
|
||||
let headers = newHttpHeaders({
|
||||
"Content-Disposition": "attachment; filename=\"example.txt\"",
|
||||
"Content-Type": "text/plain"
|
||||
|
|
|
@ -3,6 +3,9 @@ import pkg/codex/contracts
|
|||
import ./twonodes
|
||||
import ../codex/examples
|
||||
import ../contracts/time
|
||||
import ./codexconfig
|
||||
import ./codexclient
|
||||
import ./nodeconfigs
|
||||
|
||||
proc findItem[T](items: seq[T], item: T): ?!T =
|
||||
for tmp in items:
|
||||
|
@ -11,54 +14,65 @@ proc findItem[T](items: seq[T], item: T): ?!T =
|
|||
|
||||
return failure("Not found")
|
||||
|
||||
twonodessuite "Sales", debug1 = false, debug2 = false:
|
||||
multinodesuite "Sales":
|
||||
let salesConfig = NodeConfigs(
|
||||
clients: CodexConfigs.init(nodes=1).some,
|
||||
providers: CodexConfigs.init(nodes=1).some,
|
||||
)
|
||||
|
||||
var host: CodexClient
|
||||
var client: CodexClient
|
||||
|
||||
test "node handles new storage availability":
|
||||
let availability1 = client1.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
let availability2 = client1.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get
|
||||
setup:
|
||||
host = providers()[0].client
|
||||
client = clients()[0].client
|
||||
|
||||
test "node handles new storage availability", salesConfig:
|
||||
let availability1 = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
let availability2 = host.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get
|
||||
check availability1 != availability2
|
||||
|
||||
test "node lists storage that is for sale":
|
||||
let availability = client1.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
check availability in client1.getAvailabilities().get
|
||||
test "node lists storage that is for sale", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
check availability in host.getAvailabilities().get
|
||||
|
||||
test "updating non-existing availability":
|
||||
let nonExistingResponse = client1.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
test "updating non-existing availability", salesConfig:
|
||||
let nonExistingResponse = host.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
check nonExistingResponse.status == "404 Not Found"
|
||||
|
||||
test "updating availability":
|
||||
let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
test "updating availability", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
client1.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
host.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
|
||||
let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check updatedAvailability.duration == 100
|
||||
check updatedAvailability.minPrice == 200
|
||||
check updatedAvailability.maxCollateral == 200
|
||||
check updatedAvailability.totalSize == 140000
|
||||
check updatedAvailability.freeSize == 140000
|
||||
|
||||
test "updating availability - freeSize is not allowed to be changed":
|
||||
let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
let freeSizeResponse = client1.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some)
|
||||
test "updating availability - freeSize is not allowed to be changed", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
let freeSizeResponse = host.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some)
|
||||
check freeSizeResponse.status == "400 Bad Request"
|
||||
check "not allowed" in freeSizeResponse.body
|
||||
|
||||
test "updating availability - updating totalSize":
|
||||
let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
client1.patchAvailability(availability.id, totalSize=100000.u256.some)
|
||||
let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
test "updating availability - updating totalSize", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
host.patchAvailability(availability.id, totalSize=100000.u256.some)
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check updatedAvailability.totalSize == 100000
|
||||
check updatedAvailability.freeSize == 100000
|
||||
|
||||
test "updating availability - updating totalSize does not allow bellow utilized":
|
||||
test "updating availability - updating totalSize does not allow bellow utilized", salesConfig:
|
||||
let originalSize = 0xFFFFFF.u256
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
let availability = client1.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
let availability = host.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
# Lets create storage request that will utilize some of the availability's space
|
||||
let cid = client2.upload(data).get
|
||||
let id = client2.requestStorage(
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration=20*60.u256,
|
||||
reward=400.u256,
|
||||
|
@ -68,16 +82,16 @@ twonodessuite "Sales", debug1 = false, debug2 = false:
|
|||
nodes = 3,
|
||||
tolerance = 1).get
|
||||
|
||||
check eventually(client2.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check updatedAvailability.totalSize != updatedAvailability.freeSize
|
||||
|
||||
let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize
|
||||
let totalSizeResponse = client1.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some)
|
||||
let totalSizeResponse = host.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some)
|
||||
check totalSizeResponse.status == "400 Bad Request"
|
||||
check "totalSize must be larger then current totalSize" in totalSizeResponse.body
|
||||
|
||||
client1.patchAvailability(availability.id, totalSize=(originalSize + 20000).some)
|
||||
let newUpdatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
host.patchAvailability(availability.id, totalSize=(originalSize + 20000).some)
|
||||
let newUpdatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check newUpdatedAvailability.totalSize == originalSize + 20000
|
||||
check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
import pkg/codex/rest/json
|
||||
import ./twonodes
|
||||
import ../codex/examples
|
||||
import json
|
||||
from pkg/libp2p import Cid, `$`
|
||||
|
||||
twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
||||
|
||||
test "node allows local file downloads":
|
||||
twonodessuite "Uploads and downloads":
|
||||
test "node allows local file downloads", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
|
@ -19,7 +19,7 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
content1 == resp1
|
||||
content2 == resp2
|
||||
|
||||
test "node allows remote file downloads":
|
||||
test "node allows remote file downloads", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
|
@ -33,7 +33,7 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
content1 == resp1
|
||||
content2 == resp2
|
||||
|
||||
test "node fails retrieving non-existing local file":
|
||||
test "node fails retrieving non-existing local file", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get # upload to first node
|
||||
let resp2 = client2.download(cid1, local = true) # try retrieving from second node
|
||||
|
@ -64,14 +64,14 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
check manifest.hasKey("protected") == true
|
||||
check manifest["protected"].getBool() == false
|
||||
|
||||
test "node allows downloading only manifest":
|
||||
test "node allows downloading only manifest", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get
|
||||
|
||||
let resp2 = client1.downloadManifestOnly(cid1)
|
||||
checkRestContent(cid1, resp2)
|
||||
|
||||
test "node allows downloading content without stream":
|
||||
test "node allows downloading content without stream", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get
|
||||
|
||||
|
@ -80,3 +80,15 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
let resp2 = client2.download(cid1, local = true).get
|
||||
check:
|
||||
content1 == resp2
|
||||
|
||||
test "reliable transfer test", twoNodesConfig:
|
||||
proc transferTest(a: CodexClient, b: CodexClient) {.async.} =
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
let cid = a.upload(data).get
|
||||
let response = b.download(cid).get
|
||||
check:
|
||||
response == data
|
||||
|
||||
for run in 0..10:
|
||||
await transferTest(client1, client2)
|
||||
await transferTest(client2, client1)
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
from std/times import inMilliseconds, initDuration, inSeconds, fromUnix
|
||||
import std/sugar
|
||||
import pkg/codex/logutils
|
||||
import pkg/questionable/results
|
||||
import pkg/ethers/provider
|
||||
import ../contracts/time
|
||||
import ../contracts/deployment
|
||||
import ../codex/helpers
|
||||
import ../examples
|
||||
import ./marketplacesuite
|
||||
import ./nodeconfigs
|
||||
|
||||
export logutils
|
||||
|
||||
logScope:
|
||||
topics = "integration test validation"
|
||||
|
||||
template eventuallyS(expression: untyped, timeout=10, step = 5,
|
||||
cancelExpression: untyped = false): bool =
|
||||
bind Moment, now, seconds
|
||||
|
||||
proc eventuallyS: Future[bool] {.async.} =
|
||||
let endTime = Moment.now() + timeout.seconds
|
||||
var secondsElapsed = 0
|
||||
while not expression:
|
||||
if endTime < Moment.now():
|
||||
return false
|
||||
if cancelExpression:
|
||||
return false
|
||||
await sleepAsync(step.seconds)
|
||||
return true
|
||||
|
||||
await eventuallyS()
|
||||
|
||||
marketplacesuite "Validation":
|
||||
let nodes = 3
|
||||
let tolerance = 1
|
||||
let proofProbability = 1
|
||||
|
||||
proc waitForRequestToFail(
|
||||
marketplace: Marketplace,
|
||||
requestId: RequestId,
|
||||
timeout=10,
|
||||
step = 5,
|
||||
): Future[bool] {.async.} =
|
||||
let endTime = Moment.now() + timeout.seconds
|
||||
|
||||
var requestState = await marketplace.requestState(requestId)
|
||||
while requestState != RequestState.Failed:
|
||||
if endTime < Moment.now():
|
||||
return false
|
||||
if requestState != RequestState.Started:
|
||||
return false
|
||||
await sleepAsync(step.seconds)
|
||||
requestState = await marketplace.requestState(requestId)
|
||||
return true
|
||||
|
||||
test "validator marks proofs as missing when using validation groups", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
hardhat:
|
||||
HardhatConfig.none,
|
||||
|
||||
clients:
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("purchases", "onchain")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfigs.init(nodes=1)
|
||||
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("sales", "onchain")
|
||||
.some,
|
||||
|
||||
validators:
|
||||
CodexConfigs.init(nodes=2)
|
||||
.withValidationGroups(groups = 2)
|
||||
.withValidationGroupIndex(idx = 0, groupIndex = 0)
|
||||
.withValidationGroupIndex(idx = 1, groupIndex = 1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("validator") # each topic as a separate string argument
|
||||
.some
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let expiry = 5.periods
|
||||
let duration = expiry + 10.periods
|
||||
|
||||
# let mine a block to sync the blocktime with the current clock
|
||||
discard await ethProvider.send("evm_mine")
|
||||
|
||||
var currentTime = await ethProvider.currentTime()
|
||||
let requestEndTime = currentTime.truncate(uint64) + duration
|
||||
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
|
||||
# TODO: better value for data.len below. This TODO is also present in
|
||||
# testproofs.nim - we may want to address it or remove the comment.
|
||||
createAvailabilities(data.len * 2, duration)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
expiry=expiry,
|
||||
duration=duration,
|
||||
nodes=nodes,
|
||||
tolerance=tolerance,
|
||||
proofProbability=proofProbability
|
||||
)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
|
||||
debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId
|
||||
|
||||
if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"),
|
||||
timeout = (expiry + 60).int, step = 5):
|
||||
debug "validation suite: timed out waiting for the purchase to start"
|
||||
fail()
|
||||
return
|
||||
|
||||
discard await ethProvider.send("evm_mine")
|
||||
currentTime = await ethProvider.currentTime()
|
||||
let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int
|
||||
|
||||
debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds
|
||||
|
||||
check await marketplace.waitForRequestToFail(
|
||||
requestId,
|
||||
timeout = secondsTillRequestEnd + 60,
|
||||
step = 5
|
||||
)
|
||||
|
||||
test "validator uses historical state to mark missing proofs", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
hardhat:
|
||||
HardhatConfig.none,
|
||||
|
||||
clients:
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("purchases", "onchain")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfigs.init(nodes=1)
|
||||
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("sales", "onchain")
|
||||
.some
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let expiry = 5.periods
|
||||
let duration = expiry + 10.periods
|
||||
|
||||
# let mine a block to sync the blocktime with the current clock
|
||||
discard await ethProvider.send("evm_mine")
|
||||
|
||||
var currentTime = await ethProvider.currentTime()
|
||||
let requestEndTime = currentTime.truncate(uint64) + duration
|
||||
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
|
||||
# TODO: better value for data.len below. This TODO is also present in
|
||||
# testproofs.nim - we may want to address it or remove the comment.
|
||||
createAvailabilities(data.len * 2, duration)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
expiry=expiry,
|
||||
duration=duration,
|
||||
nodes=nodes,
|
||||
tolerance=tolerance,
|
||||
proofProbability=proofProbability
|
||||
)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
|
||||
debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId
|
||||
|
||||
if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"),
|
||||
timeout = (expiry + 60).int, step = 5):
|
||||
debug "validation suite: timed out waiting for the purchase to start"
|
||||
fail()
|
||||
return
|
||||
|
||||
# extra block just to make sure we have one that separates us
|
||||
# from the block containing the last (past) SlotFilled event
|
||||
discard await ethProvider.send("evm_mine")
|
||||
|
||||
var validators = CodexConfigs.init(nodes=2)
|
||||
.withValidationGroups(groups = 2)
|
||||
.withValidationGroupIndex(idx = 0, groupIndex = 0)
|
||||
.withValidationGroupIndex(idx = 1, groupIndex = 1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to: # tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("validator") # each topic as a separate string argument
|
||||
|
||||
failAndTeardownOnError "failed to start validator nodes":
|
||||
for config in validators.configs.mitems:
|
||||
let node = await startValidatorNode(config)
|
||||
running.add RunningNode(
|
||||
role: Role.Validator,
|
||||
node: node
|
||||
)
|
||||
|
||||
discard await ethProvider.send("evm_mine")
|
||||
currentTime = await ethProvider.currentTime()
|
||||
let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int
|
||||
|
||||
debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds
|
||||
|
||||
check await marketplace.waitForRequestToFail(
|
||||
requestId,
|
||||
timeout = secondsTillRequestEnd + 60,
|
||||
step = 5
|
||||
)
|
|
@ -1,94 +1,34 @@
|
|||
import std/os
|
||||
import std/macros
|
||||
import std/httpclient
|
||||
import ../ethertest
|
||||
import pkg/questionable
|
||||
import ./multinodes
|
||||
import ./codexconfig
|
||||
import ./codexprocess
|
||||
import ./codexclient
|
||||
import ./nodes
|
||||
import ./nodeconfigs
|
||||
|
||||
export ethertest
|
||||
export codexclient
|
||||
export nodes
|
||||
export multinodes
|
||||
|
||||
template twonodessuite*(name: string, debug1, debug2: bool | string, body) =
|
||||
twonodessuite(name, $debug1, $debug2, body)
|
||||
template twonodessuite*(name: string, body: untyped) =
|
||||
multinodesuite name:
|
||||
let twoNodesConfig {.inject, used.} = NodeConfigs(clients: CodexConfigs.init(nodes=2).some)
|
||||
|
||||
template twonodessuite*(name: string, debug1, debug2: string, body) =
|
||||
ethersuite name:
|
||||
|
||||
var node1 {.inject, used.}: NodeProcess
|
||||
var node2 {.inject, used.}: NodeProcess
|
||||
var node1 {.inject, used.}: CodexProcess
|
||||
var node2 {.inject, used.}: CodexProcess
|
||||
var client1 {.inject, used.}: CodexClient
|
||||
var client2 {.inject, used.}: CodexClient
|
||||
var account1 {.inject, used.}: Address
|
||||
var account2 {.inject, used.}: Address
|
||||
|
||||
let dataDir1 = getTempDir() / "Codex1"
|
||||
let dataDir2 = getTempDir() / "Codex2"
|
||||
|
||||
setup:
|
||||
client1 = CodexClient.new("http://localhost:8080/api/codex/v1")
|
||||
client2 = CodexClient.new("http://localhost:8081/api/codex/v1")
|
||||
account1 = accounts[0]
|
||||
account2 = accounts[1]
|
||||
|
||||
var node1Args = @[
|
||||
"--api-port=8080",
|
||||
"--data-dir=" & dataDir1,
|
||||
"--nat=127.0.0.1",
|
||||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=8090",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm",
|
||||
"--circom-zkey=tests/circuits/fixtures/proof_main.zkey",
|
||||
"--eth-provider=http://127.0.0.1:8545",
|
||||
"--eth-account=" & $account1
|
||||
]
|
||||
node1 = clients()[0]
|
||||
node2 = clients()[1]
|
||||
|
||||
if debug1 != "true" and debug1 != "false":
|
||||
node1Args.add("--log-level=" & debug1)
|
||||
|
||||
node1 = startNode(node1Args, debug = debug1)
|
||||
node1.waitUntilStarted()
|
||||
|
||||
let bootstrap = (!client1.info()["spr"]).getStr()
|
||||
|
||||
var node2Args = @[
|
||||
"--api-port=8081",
|
||||
"--data-dir=" & dataDir2,
|
||||
"--nat=127.0.0.1",
|
||||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=8091",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--bootstrap-node=" & bootstrap,
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm",
|
||||
"--circom-zkey=tests/circuits/fixtures/proof_main.zkey",
|
||||
"--eth-provider=http://127.0.0.1:8545",
|
||||
"--eth-account=" & $account2
|
||||
]
|
||||
|
||||
if debug2 != "true" and debug2 != "false":
|
||||
node2Args.add("--log-level=" & debug2)
|
||||
|
||||
node2 = startNode(node2Args, debug = debug2)
|
||||
node2.waitUntilStarted()
|
||||
|
||||
# ensure that we have a recent block with a fresh timestamp
|
||||
discard await send(ethProvider, "evm_mine")
|
||||
|
||||
teardown:
|
||||
client1.close()
|
||||
client2.close()
|
||||
|
||||
node1.stop()
|
||||
node2.stop()
|
||||
|
||||
removeDir(dataDir1)
|
||||
removeDir(dataDir2)
|
||||
client1 = node1.client
|
||||
client2 = node2.client
|
||||
|
||||
body
|
||||
|
|
|
@ -2,5 +2,6 @@ import ./contracts/testContracts
|
|||
import ./contracts/testMarket
|
||||
import ./contracts/testDeployment
|
||||
import ./contracts/testClock
|
||||
import ./contracts/testProvider
|
||||
|
||||
{.warning[UnusedImport]:off.}
|
||||
|
|
|
@ -6,6 +6,7 @@ import ./integration/testpurchasing
|
|||
import ./integration/testblockexpiration
|
||||
import ./integration/testmarketplace
|
||||
import ./integration/testproofs
|
||||
import ./integration/testvalidator
|
||||
import ./integration/testecbug
|
||||
|
||||
{.warning[UnusedImport]:off.}
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 06f9f56cd27f3eba5fee88a4cdf20668f3d9ce43
|
||||
Subproject commit dfab6102e71d2acaff86af45b87be2536530c624
|
Loading…
Reference in New Issue