diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..6427da1d --- /dev/null +++ b/.dockerignore @@ -0,0 +1,6 @@ +.github +build +docs +metrics +nimcache +tests diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..bbcbbe7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/actions/nimbus-build-system/action.yml b/.github/actions/nimbus-build-system/action.yml index b78c5596..e4f58209 100644 --- a/.github/actions/nimbus-build-system/action.yml +++ b/.github/actions/nimbus-build-system/action.yml @@ -9,17 +9,26 @@ inputs: cpu: description: "CPU to build for" default: "amd64" - nim_branch: + nim_version: description: "Nim version" default: "version-1-6" + rust_version: + description: "Rust version" + default: "1.78.0" shell: description: "Shell to run commands in" default: "bash --noprofile --norc -e -o pipefail" runs: using: "composite" steps: - - name: APT (Linux amd64) - if: inputs.os == 'linux' && inputs.cpu == 'amd64' + - name: Rust (Linux) + if: inputs.os == 'linux' + shell: ${{ inputs.shell }} {0} + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${{ inputs.rust_version }} -y + + - name: APT (Linux amd64/arm64) + if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64') shell: ${{ inputs.shell }} {0} run: | sudo apt-fast update -qq @@ -45,6 +54,7 @@ runs: if: inputs.os == 'windows' && inputs.cpu == 'amd64' uses: msys2/setup-msys2@v2 with: + path-type: inherit msystem: UCRT64 install: > base-devel @@ -52,11 +62,13 @@ runs: mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-ntldd-git + mingw-w64-ucrt-x86_64-rust - name: MSYS2 (Windows i386) if: inputs.os == 'windows' && inputs.cpu == 'i386' uses: msys2/setup-msys2@v2 with: + path-type: inherit msystem: MINGW32 install: > base-devel @@ -64,6 +76,13 @@ runs: mingw-w64-i686-toolchain mingw-w64-i686-cmake mingw-w64-i686-ntldd-git + mingw-w64-i686-rust + + - name: MSYS2 (Windows All) - Downgrade to gcc 13 + if: inputs.os == 'windows' + shell: ${{ inputs.shell }} {0} + run: | + pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst - name: Derive environment variables shell: ${{ inputs.shell }} {0} @@ -73,15 +92,10 @@ runs: printf "'%s'" "$quoted" } - if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then - PLATFORM=x64 - else - PLATFORM=x86 - fi - echo "PLATFORM=${PLATFORM}" >> ${GITHUB_ENV} + [[ '${{ inputs.cpu }}' == 'i386' ]] && echo "ARCH_OVERRIDE=ARCH_OVERRIDE=x86" >> ${GITHUB_ENV} - # Stack usage on Linux amd64 - if [[ '${{ inputs.os }}' == 'linux' && '${{ inputs.cpu }}' == 'amd64' ]]; then + # Stack usage on Linux amd64/arm64 + if [[ '${{ inputs.os }}' == 'linux' && ('${{ inputs.cpu }}' == 'amd64' || '${{ inputs.cpu }}' == 'arm64')]]; then NIMFLAGS="${NIMFLAGS} -d:limitStackUsage" echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV fi @@ -135,35 +149,28 @@ runs: # Use all available CPUs for build process ncpu="" case '${{ inputs.os }}' in - 'linux') - ncpu=$(nproc) - ;; - 'macos') - ncpu=$(sysctl -n hw.ncpu) - ;; - 'windows') - ncpu=${NUMBER_OF_PROCESSORS} - ;; + 'linux') ncpu=$(nproc) ;; + 'macos') ncpu=$(sysctl -n hw.ncpu) ;; + 'windows') ncpu=${NUMBER_OF_PROCESSORS} ;; esac [[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1 echo "ncpu=${ncpu}" >> ${GITHUB_ENV} - name: Restore Nim toolchain binaries from cache id: nim-cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: NimBinaries - key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} - restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }} + key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} + restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }} - name: Set NIM_COMMIT shell: ${{ inputs.shell }} {0} - run: echo "NIM_COMMIT=${{ inputs.nim_branch }}" >> ${GITHUB_ENV} + run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV} - name: Build Nim and Codex dependencies shell: ${{ inputs.shell }} {0} run: | - make -j${ncpu} CI_CACHE=NimBinaries ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1 update + make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update echo ./env.sh nim --version - diff --git a/.github/workflows/Readme.md b/.github/workflows/Readme.md new file mode 100644 index 00000000..c644ee9d --- /dev/null +++ b/.github/workflows/Readme.md @@ -0,0 +1,77 @@ +Tips for shorter build times +---------------------------- + +### Runner availability ### + +Currently, the biggest bottleneck when optimizing workflows is the availability +of Windows and macOS runners. Therefore, anything that reduces the time spent in +Windows or macOS jobs will have a positive impact on the time waiting for +runners to become available. The usage limits for Github Actions are [described +here][limits]. You can see a breakdown of runner usage for your jobs in the +Github Actions tab ([example][usage]). + +### Windows is slow ### + +Performing git operations and compilation are both slow on Windows. This can +easily mean that a Windows job takes twice as long as a Linux job. Therefore it +makes sense to use a Windows runner only for testing Windows compatibility, and +nothing else. Testing compatibility with other versions of Nim, code coverage +analysis, etc. are therefore better performed on a Linux runner. + +### Parallelization ### + +Breaking up a long build job into several jobs that you run in parallel can have +a positive impact on the wall clock time that a workflow runs. For instance, you +might consider running unit tests and integration tests in parallel. Keep in +mind however that availability of macOS and Windows runners is the biggest +bottleneck. If you split a Windows job into two jobs, you now need to wait for +two Windows runners to become available! Therefore parallelization often only +makes sense for Linux jobs. + +### Refactoring ### + +As with any code, complex workflows are hard to read and change. You can use +[composite actions][composite] and [reusable workflows][reusable] to refactor +complex workflows. + +### Steps for measuring time + +Breaking up steps allows you to see the time spent in each part. For instance, +instead of having one step where all tests are performed, you might consider +having separate steps for e.g. unit tests and integration tests, so that you can +see how much time is spent in each. + +### Fix slow tests ### + +Try to avoid slow unit tests. They not only slow down continuous integration, +but also local development. If you encounter slow tests you can consider +reworking them to stub out the slow parts that are not under test, or use +smaller data structures for the test. + +You can use [unittest2][unittest2] together with the environment variable +`NIMTEST_TIMING=true` to show how much time is spent in every test +([reference][testtime]). + +### Caching ### + +Ensure that caches are updated over time. For instance if you cache the latest +version of the Nim compiler, then you want to update the cache when a new +version of the compiler is released. See also the documentation +for the [cache action][cache]. + +### Fail fast ### + +By default a workflow fails fast: if one job fails, the rest are cancelled. This +might seem inconvenient, because when you're debugging an issue you often want +to know whether you introduced a failure on all platforms, or only on a single +one. You might be tempted to disable fail-fast, but keep in mind that this keeps +runners busy for longer on a workflow that you know is going to fail anyway. +Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed. + +[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage +[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action +[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows +[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache +[unittest2]: https://github.com/status-im/nim-unittest2 +[testtime]: https://github.com/status-im/nim-unittest2/pull/12 +[limits]: https://docs.github.com/en/actions/learn-github-actions/usage-limits-billing-and-administration#usage-limits diff --git a/.github/workflows/ci-reusable.yml b/.github/workflows/ci-reusable.yml new file mode 100644 index 00000000..08a182f5 --- /dev/null +++ b/.github/workflows/ci-reusable.yml @@ -0,0 +1,88 @@ +name: Reusable - CI + +on: + workflow_call: + inputs: + matrix: + type: string + cache_nonce: + default: '0' + description: Allows for easily busting actions/cache caches + required: false + type: string + +env: + cache_nonce: ${{ inputs.cache_nonce }} + +jobs: + build: + strategy: + matrix: + include: ${{ fromJson(inputs.matrix) }} + + defaults: + run: + shell: ${{ matrix.shell }} {0} + + name: '${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.tests }}' + runs-on: ${{ matrix.builder }} + timeout-minutes: 90 + steps: + - name: Checkout sources + uses: actions/checkout@v4 + with: + submodules: recursive + ref: ${{ github.event.pull_request.head.sha }} + + - name: Setup Nimbus Build System + uses: ./.github/actions/nimbus-build-system + with: + os: ${{ matrix.os }} + shell: ${{ matrix.shell }} + nim_version: ${{ matrix.nim_version }} + + ## Part 1 Tests ## + - name: Unit tests + if: matrix.tests == 'unittest' || matrix.tests == 'all' + run: make -j${ncpu} test + + # workaround for https://github.com/NomicFoundation/hardhat/issues/3877 + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 18.15 + + - name: Start Ethereum node with Codex contracts + if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'all' + working-directory: vendor/codex-contracts-eth + env: + MSYS2_PATH_TYPE: inherit + run: | + npm install + npm start & + + ## Part 2 Tests ## + - name: Contract tests + if: matrix.tests == 'contract' || matrix.tests == 'all' + run: make -j${ncpu} testContracts + + ## Part 3 Tests ## + - name: Integration tests + if: matrix.tests == 'integration' || matrix.tests == 'all' + run: make -j${ncpu} testIntegration + + - name: Upload integration tests log files + uses: actions/upload-artifact@v4 + if: (matrix.tests == 'integration' || matrix.tests == 'all') && always() + with: + name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs + path: tests/integration/logs/ + retention-days: 1 + + status: + if: always() + needs: [build] + runs-on: ubuntu-latest + steps: + - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} + run: exit 1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4fddce10..4754e3ae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,106 +1,74 @@ name: CI + on: push: branches: - - main + - master pull_request: workflow_dispatch: + env: cache_nonce: 0 # Allows for easily busting actions/cache caches + nim_version: pinned + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: - build: - strategy: - matrix: - os: [linux, macos, windows] - include: - - os: linux - builder: ubuntu-latest - shell: bash --noprofile --norc -e -o pipefail - - os: macos - builder: macos-latest - shell: bash --noprofile --norc -e -o pipefail - - os: windows - builder: windows-latest - shell: msys2 - - defaults: - run: - shell: ${{ matrix.shell }} {0} - - name: '${{ matrix.os }}' - runs-on: ${{ matrix.builder }} - timeout-minutes: 80 + matrix: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.matrix.outputs.matrix }} + cache_nonce: ${{ env.cache_nonce }} steps: - - name: Checkout sources - uses: actions/checkout@v3 - with: - submodules: recursive + - name: Compute matrix + id: matrix + uses: fabiocaccamo/create-matrix-action@v4 + with: + matrix: | + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2} + os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2} - - name: Setup Nimbus Build System - uses: ./.github/actions/nimbus-build-system - with: - os: ${{ matrix.os }} - shell: ${{ matrix.shell }} - - - name: Unit tests - run: make -j${ncpu} test - - - name: Start Ethereum node with Codex contracts - working-directory: vendor/dagger-contracts - run: | - if [[ '${{ matrix.os }}' == 'windows' ]]; then - export PATH="${PATH}:/c/program files/nodejs" - fi - npm install - npm start & - - - name: Contract tests - run: make -j${ncpu} testContracts - - - name: Integration tests - run: make -j${ncpu} testIntegration + build: + needs: matrix + uses: ./.github/workflows/ci-reusable.yml + with: + matrix: ${{ needs.matrix.outputs.matrix }} + cache_nonce: ${{ needs.matrix.outputs.cache_nonce }} coverage: - continue-on-error: true runs-on: ubuntu-latest steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive + ref: ${{ github.event.pull_request.head.sha }} - name: Setup Nimbus Build System uses: ./.github/actions/nimbus-build-system with: os: linux + nim_version: ${{ env.nim_version }} - name: Generate coverage data - run: make -j${ncpu} coverage + run: | + # make -j${ncpu} coverage + make -j${ncpu} coverage-script shell: bash - name: Upload coverage data to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: directory: ./coverage/ fail_ci_if_error: true files: ./coverage/coverage.f.info flags: unittests name: codecov-umbrella + token: ${{ secrets.CODECOV_TOKEN }} verbose: true - - nim_1_2: - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v3 - with: - submodules: recursive - - - name: Setup Nimbus Build System - uses: ./.github/actions/nimbus-build-system - with: - os: linux - nim_branch: version-1-2 - - - name: Unit tests - run: make -j${ncpu} test diff --git a/.github/workflows/docker-dist-tests.yml b/.github/workflows/docker-dist-tests.yml new file mode 100644 index 00000000..ae1e5e9e --- /dev/null +++ b/.github/workflows/docker-dist-tests.yml @@ -0,0 +1,33 @@ +name: Docker - Dist-Tests + + +on: + push: + branches: + - master + tags: + - 'v*.*.*' + paths-ignore: + - '**/*.md' + - '.gitignore' + - '.github/**' + - '!.github/workflows/docker-dist-tests.yml' + - '!.github/workflows/docker-reusable.yml' + - 'docker/**' + - '!docker/codex.Dockerfile' + - '!docker/docker-entrypoint.sh' + workflow_dispatch: + + +jobs: + build-and-push: + name: Build and Push + uses: ./.github/workflows/docker-reusable.yml + with: + nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true' + nat_ip_auto: true + tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }} + tag_suffix: dist-tests + continuous_tests_list: PeersTest HoldMyBeerTest + continuous_tests_duration: 12h + secrets: inherit diff --git a/.github/workflows/docker-reusable.yml b/.github/workflows/docker-reusable.yml new file mode 100644 index 00000000..ebd37ae8 --- /dev/null +++ b/.github/workflows/docker-reusable.yml @@ -0,0 +1,267 @@ +name: Reusable - Docker + + +on: + workflow_call: + inputs: + docker_file: + default: docker/codex.Dockerfile + description: Dockerfile + required: false + type: string + docker_repo: + default: codexstorage/nim-codex + description: DockerHub repository + required: false + type: string + make_parallel: + default: 4 + description: Make parallel + required: false + type: number + nimflags: + default: '-d:disableMarchNative' + description: Nim flags for builds + required: false + type: string + nat_ip_auto: + default: false + description: Enable NAT IP auto + required: false + type: boolean + tag_latest: + default: true + description: Set latest tag for Docker images + required: false + type: boolean + tag_sha: + default: true + description: Set Git short commit as Docker tag + required: false + type: boolean + tag_suffix: + default: '' + description: Suffix for Docker images tag + required: false + type: string + continuous_tests_list: + default: '' + description: Continuous Tests list + required: false + type: string + continuous_tests_duration: + default: 48h + description: Continuous Tests duration + required: false + type: string + + +env: + # Build + DOCKER_FILE: ${{ inputs.docker_file }} + DOCKER_REPO: ${{ inputs.docker_repo }} + MAKE_PARALLEL: ${{ inputs.make_parallel }} + NIMFLAGS: ${{ inputs.nimflags }} + NAT_IP_AUTO: ${{ inputs.nat_ip_auto }} + TAG_LATEST: ${{ inputs.tag_latest }} + TAG_SHA: ${{ inputs.tag_sha }} + TAG_SUFFIX: ${{ inputs.tag_suffix }} + # Tests + CONTINUOUS_TESTS_SOURCE: codex-storage/cs-codex-dist-tests + CONTINUOUS_TESTS_BRANCH: master + CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }} + CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }} + CONTINUOUS_TESTS_NAMEPREFIX: c-tests-ci + + +jobs: + # Build platform specific image + build: + strategy: + fail-fast: true + matrix: + target: + - os: linux + arch: amd64 + - os: linux + arch: arm64 + include: + - target: + os: linux + arch: amd64 + builder: ubuntu-22.04 + - target: + os: linux + arch: arm64 + builder: buildjet-4vcpu-ubuntu-2204-arm + + name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }} + runs-on: ${{ matrix.builder }} + env: + PLATFORM: ${{ format('{0}/{1}', 'linux', matrix.target.arch) }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Docker - Meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.DOCKER_REPO }} + + - name: Docker - Set up Buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker - Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Docker - Build and Push by digest + id: build + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ env.DOCKER_FILE }} + platforms: ${{ env.PLATFORM }} + push: true + build-args: | + MAKE_PARALLEL=${{ env.MAKE_PARALLEL }} + NIMFLAGS=${{ env.NIMFLAGS }} + NAT_IP_AUTO=${{ env.NAT_IP_AUTO }} + labels: ${{ steps.meta.outputs.labels }} + outputs: type=image,name=${{ env.DOCKER_REPO }},push-by-digest=true,name-canonical=true,push=true + + - name: Docker - Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Docker - Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ matrix.target.arch }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + + # Publish multi-platform image + publish: + name: Publish multi-platform image + runs-on: ubuntu-latest + outputs: + version: ${{ steps.meta.outputs.version }} + needs: build + steps: + - name: Docker - Variables + run: | + # Adjust custom suffix when set and + if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then + echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV + fi + # Disable SHA tags on tagged release + if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then + echo "TAG_SHA=false" >>$GITHUB_ENV + fi + # Handle latest and latest-custom using raw + if [[ ${{ env.TAG_SHA }} == "false" ]]; then + echo "TAG_LATEST=false" >>$GITHUB_ENV + echo "TAG_RAW=true" >>$GITHUB_ENV + if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then + echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV + else + echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV + fi + else + echo "TAG_RAW=false" >>$GITHUB_ENV + fi + + - name: Docker - Download digests + uses: actions/download-artifact@v4 + with: + pattern: digests-* + merge-multiple: true + path: /tmp/digests + + - name: Docker - Set up Buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker - Meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.DOCKER_REPO }} + flavor: | + latest=${{ env.TAG_LATEST }} + suffix=${{ env.TAG_SUFFIX }},onlatest=true + tags: | + type=semver,pattern={{version}} + type=raw,enable=${{ env.TAG_RAW }},value=latest + type=sha,enable=${{ env.TAG_SHA }} + + - name: Docker - Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Docker - Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *) + + - name: Docker - Inspect image + run: | + docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }} + + + # Compute Continuous Tests inputs + compute-tests-inputs: + name: Compute Continuous Tests list + if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }} + runs-on: ubuntu-latest + needs: publish + outputs: + source: ${{ steps.compute.outputs.source }} + branch: ${{ steps.compute.outputs.branch }} + codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }} + nameprefix: ${{ steps.compute.outputs.nameprefix }} + continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }} + continuous_tests_duration: ${{ steps.compute.outputs.continuous_tests_duration }} + continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }} + workflow_source: ${{ steps.compute.outputs.workflow_source }} + steps: + - name: Compute Continuous Tests list + id: compute + run: | + echo "source=${{ format('{0}/{1}', github.server_url, env.CONTINUOUS_TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT" + echo "branch=${{ env.CONTINUOUS_TESTS_BRANCH }}" >> "$GITHUB_OUTPUT" + echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT" + echo "nameprefix=$(awk '{ print tolower($0) }' <<< ${{ env.CONTINUOUS_TESTS_NAMEPREFIX }})" >> "$GITHUB_OUTPUT" + echo "continuous_tests_list=$(jq -cR 'split(" ")' <<< '${{ env.CONTINUOUS_TESTS_LIST }}')" >> "$GITHUB_OUTPUT" + echo "continuous_tests_duration=${{ env.CONTINUOUS_TESTS_DURATION }}" >> "$GITHUB_OUTPUT" + echo "workflow_source=${{ env.CONTINUOUS_TESTS_SOURCE }}" >> "$GITHUB_OUTPUT" + + + # Run Continuous Tests + run-tests: + name: Run Continuous Tests + needs: [publish, compute-tests-inputs] + strategy: + max-parallel: 1 + matrix: + tests: ${{ fromJSON(needs.compute-tests-inputs.outputs.continuous_tests_list) }} + uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master + with: + source: ${{ needs.compute-tests-inputs.outputs.source }} + branch: ${{ needs.compute-tests-inputs.outputs.branch }} + codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }} + nameprefix: ${{ needs.compute-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }} + tests_filter: ${{ matrix.tests }} + tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }} + workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }} + secrets: inherit diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 00000000..fb97c339 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,28 @@ +name: Docker + + +on: + push: + branches: + - master + tags: + - 'v*.*.*' + paths-ignore: + - '**/*.md' + - '.gitignore' + - '.github/**' + - '!.github/workflows/docker.yml' + - '!.github/workflows/docker-reusable.yml' + - 'docker/**' + - '!docker/codex.Dockerfile' + - '!docker/docker-entrypoint.sh' + workflow_dispatch: + + +jobs: + build-and-push: + name: Build and Push + uses: ./.github/workflows/docker-reusable.yml + with: + tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }} + secrets: inherit diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..6ec42ebe --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,65 @@ +name: OpenAPI + +on: + push: + branches: + - 'master' + paths: + - 'openapi.yaml' + - '.github/workflows/docs.yml' + pull_request: + branches: + - '**' + paths: + - 'openapi.yaml' + - '.github/workflows/docs.yml' + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: '0' + + - uses: actions/setup-node@v4 + with: + node-version: 18 + + - name: Lint OpenAPI + shell: bash + run: npx @redocly/cli lint openapi.yaml + + deploy: + name: Deploy + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/master' + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: '0' + + - uses: actions/setup-node@v4 + with: + node-version: 18 + + - name: Build OpenAPI + shell: bash + run: npx @redocly/cli build-docs openapi.yaml --output "openapi/index.html" --title "Codex API" + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: './openapi' + + - name: Deploy to GitHub Pages + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml new file mode 100644 index 00000000..579ee6a5 --- /dev/null +++ b/.github/workflows/nim-matrix.yml @@ -0,0 +1,30 @@ +name: Nim matrix + +on: + merge_group: + workflow_dispatch: + +env: + cache_nonce: 0 # Allows for easily busting actions/cache caches + nim_version: pinned + +jobs: + matrix: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.matrix.outputs.matrix }} + cache_nonce: ${{ env.cache_nonce }} + steps: + - name: Compute matrix + id: matrix + uses: fabiocaccamo/create-matrix-action@v4 + with: + matrix: | + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + + build: + needs: matrix + uses: ./.github/workflows/ci-reusable.yml + with: + matrix: ${{ needs.matrix.outputs.matrix }} + cache_nonce: ${{ needs.matrix.outputs.cache_nonce }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..a9ba1fcc --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,158 @@ +name: Release + +on: + push: + tags: + - 'v*.*.*' + workflow_dispatch: + +env: + cache_nonce: 0 # Allows for easily busting actions/cache caches + nim_version: pinned + rust_version: 1.78.0 + binary_base: codex + build_dir: build + nim_flags: '-d:verify_circuit=true' + windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll' + +jobs: + # Matrix + matrix: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.matrix.outputs.matrix }} + steps: + - name: Compute matrix + id: matrix + uses: fabiocaccamo/create-matrix-action@v4 + with: + matrix: | + os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {arm64}, builder {buildjet-4vcpu-ubuntu-2204-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2} + + # Build + build: + needs: matrix + strategy: + fail-fast: false + matrix: + include: ${{fromJson(needs.matrix.outputs.matrix)}} + + defaults: + run: + shell: ${{ matrix.shell }} {0} + + name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }} + runs-on: ${{ matrix.builder }} + timeout-minutes: 80 + steps: + - name: Release - Checkout sources + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Release - Setup Nimbus Build System + uses: ./.github/actions/nimbus-build-system + with: + os: ${{ matrix.os }} + cpu: ${{ matrix.cpu }} + shell: ${{ matrix.shell }} + nim_version: ${{ matrix.nim_version }} + rust_version: ${{ matrix.rust_version }} + + - name: Release - Compute binary name + run: | + case ${{ matrix.os }} in + linux*) os_name="linux" ;; + macos*) os_name="darwin" ;; + windows*) os_name="windows" ;; + esac + binary="${{ env.binary_base }}-${{ github.ref_name }}-${os_name}-${{ matrix.cpu }}" + [[ ${os_name} == "windows" ]] && binary="${binary}.exe" + echo "binary=${binary}" >>$GITHUB_ENV + + - name: Release - Build + run: | + make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.binary }} ${{ env.nim_flags }}" + + - name: Release - Libraries + run: | + if [[ "${{ matrix.os }}" == "windows" ]]; then + for lib in ${{ env.windows_libs }}; do + cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}" + done + fi + + - name: Release - Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: release-${{ env.binary }} + path: ${{ env.build_dir }}/ + retention-days: 1 + + # Release + release: + runs-on: ubuntu-latest + needs: build + if: success() || failure() + steps: + - name: Release - Download binaries + uses: actions/download-artifact@v4 + with: + pattern: release* + merge-multiple: true + path: /tmp/release + + - name: Release - Compress and checksum + run: | + cd /tmp/release + checksum() { + arc="${1}" + sha256sum "${arc}" >"${arc}.sha256" + } + + # Compress and prepare + for file in *; do + # Exclude libraries + if [[ "${file}" != *".dll"* ]]; then + if [[ "${file}" == *".exe"* ]]; then + + # Windows - binary only + arc="${file%.*}.zip" + zip "${arc}" "${file}" + checksum "${arc}" + + # Windows - binary and libs + arc="${file%.*}-libs.zip" + zip "${arc}" "${file}" ${{ env.windows_libs }} + rm -f "${file}" ${{ env.windows_libs }} + checksum "${arc}" + else + + # Linux/macOS + arc="${file}.tar.gz" + chmod 755 "${file}" + tar cfz "${arc}" "${file}" + rm -f "${file}" + checksum "${arc}" + fi + fi + done + + - name: Release - Upload compressed artifacts and checksums + uses: actions/upload-artifact@v4 + with: + name: archives-and-checksums + path: /tmp/release/ + retention-days: 1 + + - name: Release + uses: softprops/action-gh-release@v2 + if: startsWith(github.ref, 'refs/tags/') + with: + files: | + /tmp/release/* + make_latest: true diff --git a/.gitignore b/.gitignore index 4ecde076..1b8885e0 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ !*.* *.exe +!LICENSE* !Makefile nimcache/ @@ -15,6 +16,8 @@ coverage/ # Nimble packages /vendor/.nimble +/vendor/packages/ +# /vendor/*/ # Nimble user files nimble.develop @@ -23,6 +26,9 @@ nimble.paths # vscode .vscode +# JetBrain's IDEs +.idea + # Each developer can create a personal .env file with # local settings overrides (e.g. WEB3_URL) .env @@ -30,3 +36,8 @@ nimble.paths .update.timestamp codex.nims nimbus-build-system.paths +docker/hostdatadir +docker/prometheus-data +.DS_Store +nim.cfg +tests/integration/logs diff --git a/.gitmodules b/.gitmodules index 9abd397f..06d1b823 100644 --- a/.gitmodules +++ b/.gitmodules @@ -5,7 +5,7 @@ branch = master [submodule "vendor/nim-libp2p"] path = vendor/nim-libp2p - url = https://github.com/status-im/nim-libp2p.git + url = https://github.com/vacp2p/nim-libp2p.git ignore = untracked branch = master [submodule "vendor/nimcrypto"] @@ -133,10 +133,6 @@ url = https://github.com/status-im/nim-websock.git ignore = untracked branch = master -[submodule "vendor/dagger-contracts"] - path = vendor/dagger-contracts - url = https://github.com/status-im/dagger-contracts - ignore = dirty [submodule "vendor/nim-contract-abi"] path = vendor/nim-contract-abi url = https://github.com/status-im/nim-contract-abi @@ -168,9 +164,9 @@ [submodule "vendor/nim-leopard"] path = vendor/nim-leopard url = https://github.com/status-im/nim-leopard.git -[submodule "vendor/nim-libp2p-dht"] - path = vendor/nim-libp2p-dht - url = https://github.com/status-im/nim-libp2p-dht.git +[submodule "vendor/nim-codex-dht"] + path = vendor/nim-codex-dht + url = https://github.com/codex-storage/nim-codex-dht.git ignore = untracked branch = master [submodule "vendor/nim-datastore"] @@ -182,3 +178,40 @@ [submodule "vendor/nim-eth"] path = vendor/nim-eth url = https://github.com/status-im/nim-eth +[submodule "vendor/codex-contracts-eth"] + path = vendor/codex-contracts-eth + url = https://github.com/status-im/codex-contracts-eth +[submodule "vendor/nim-protobuf-serialization"] + path = vendor/nim-protobuf-serialization + url = https://github.com/status-im/nim-protobuf-serialization +[submodule "vendor/nim-results"] + path = vendor/nim-results + url = https://github.com/arnetheduck/nim-results +[submodule "vendor/nim-testutils"] + path = vendor/nim-testutils + url = https://github.com/status-im/nim-testutils +[submodule "vendor/npeg"] + path = vendor/npeg + url = https://github.com/zevv/npeg +[submodule "vendor/nim-poseidon2"] + path = vendor/nim-poseidon2 + url = https://github.com/codex-storage/nim-poseidon2.git +[submodule "vendor/constantine"] + path = vendor/constantine + url = https://github.com/mratsim/constantine.git +[submodule "vendor/nim-circom-compat"] + path = vendor/nim-circom-compat + url = https://github.com/codex-storage/nim-circom-compat.git + ignore = untracked + branch = master +[submodule "vendor/codex-storage-proofs-circuits"] + path = vendor/codex-storage-proofs-circuits + url = https://github.com/codex-storage/codex-storage-proofs-circuits.git + ignore = untracked + branch = master +[submodule "vendor/nim-serde"] + path = vendor/nim-serde + url = https://github.com/codex-storage/nim-serde.git +[submodule "vendor/nim-leveldbstatic"] + path = vendor/nim-leveldbstatic + url = https://github.com/codex-storage/nim-leveldb.git diff --git a/BUILDING.md b/BUILDING.md index d97e5400..525fa160 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -18,45 +18,57 @@ To build nim-codex, developer tools need to be installed and accessible in the OS. -Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/status-im/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work. +Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/codex-storage/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work. Other approaches may be viable. On macOS, some users may prefer [MacPorts](https://www.macports.org/) to [Homebrew](https://brew.sh/). On Windows, rather than use MSYS2, some users may prefer to install developer tools with [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/), [Scoop](https://scoop.sh/), or [Chocolatey](https://chocolatey.org/), or download installers for e.g. Make and CMake while otherwise relying on official Windows developer tools. Community contributions to these docs and our build system are welcome! +### Rust + +The current implementation of Codex's zero-knowledge proving circuit requires the installation of rust v1.76.0 or greater. Be sure to install it for your OS and add it to your terminal's path such that the command `cargo --version` gives a compatible version. + ### Linux *Package manager commands may require `sudo` depending on OS setup.* On a bare bones installation of Debian (or a distribution derived from Debian, such as Ubuntu), run -```text -$ apt-get update && apt-get install build-essential cmake curl git +```shell +apt-get update && apt-get install build-essential cmake curl git rustc cargo ``` Non-Debian distributions have different package managers: `apk`, `dnf`, `pacman`, `rpm`, `yum`, etc. For example, on a bare bones installation of Fedora, run -```text -$ dnf install @development-tools cmake gcc-c++ which +```shell +dnf install @development-tools cmake gcc-c++ rust cargo +``` + +In case your distribution does not provide required Rust version, we may install it using [rustup](https://www.rust-lang.org/tools/install) +```shell +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=1.76.0 -y + +. "$HOME/.cargo/env" ``` ### macOS Install the [Xcode Command Line Tools](https://mac.install.guide/commandlinetools/index.html) by opening a terminal and running -```text -$ xcode-select --install +```shell +xcode-select --install ``` Install [Homebrew (`brew`)](https://brew.sh/) and in a new terminal run -```text -$ brew install bash cmake +```shell +brew install bash cmake rust ``` Check that `PATH` is setup correctly -```text -$ which bash cmake -/usr/local/bin/bash -/usr/local/bin/cmake +```shell +which bash cmake + +# /usr/local/bin/bash +# /usr/local/bin/cmake ``` ### Windows + MSYS2 @@ -68,14 +80,40 @@ Download and run the installer from [msys2.org](https://www.msys2.org/). Launch an MSYS2 [environment](https://www.msys2.org/docs/environments/). UCRT64 is generally recommended: from the Windows *Start menu* select `MSYS2 MinGW UCRT x64`. Assuming a UCRT64 environment, in Bash run -```text -$ pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake +```shell +pacman -Suy +pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-rust ``` +#### Optional: VSCode Terminal integration + +You can link the MSYS2-UCRT64 terminal into VSCode by modifying the configuration file as shown below. +File: `C:/Users//AppData/Roaming/Code/User/settings.json` +```json +{ + ... + "terminal.integrated.profiles.windows": { + ... + "MSYS2-UCRT64": { + "path": "C:\\msys64\\usr\\bin\\bash.exe", + "args": [ + "--login", + "-i" + ], + "env": { + "MSYSTEM": "UCRT64", + "CHERE_INVOKING": "1", + "MSYS2_PATH_TYPE": "inherit" + } + } + } +} +``` + ### Other It is possible that nim-codex can be built and run on other platforms supported by the [Nim](https://nim-lang.org/) language: BSD family, older versions of Windows, etc. There has not been sufficient experimentation with nim-codex on such platforms, so instructions are not provided. Community contributions to these docs and our build system are welcome! @@ -83,30 +121,30 @@ It is possible that nim-codex can be built and run on other platforms supported ## Repository In Bash run -```text -$ git clone https://github.com/status-im/nim-codex.git repos/nim-codex && cd repos/nim-codex +```shell +git clone https://github.com/codex-storage/nim-codex.git repos/nim-codex && cd repos/nim-codex ``` -nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system#readme), so next run -```text -$ make update +nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system), so next run +```shell +make update ``` This step can take a while to complete because by default it builds the [Nim compiler](https://nim-lang.org/docs/nimc.html). To see more output from `make` pass `V=1`. This works for all `make` targets in projects using the nimbus-build-system -```text -$ make V=1 update +```shell +make V=1 update ``` ## Executable In Bash run -```text -$ make exec +```shell +make ``` -The `exec` target creates the `build/codex` executable. +The default `make` target creates the `build/codex` executable. ## Example usage @@ -115,29 +153,40 @@ See the [instructions](README.md#cli-options) in the main readme. ## Tests In Bash run -```text -$ make test +```shell +make test ``` ### testAll +#### Prerequisites + +To run the integration tests, an Ethereum test node is required. Follow these instructions to set it up. + +##### Windows (do this before 'All platforms') + +1. Download and install Visual Studio 2017 or newer. (Not VSCode!) In the Workloads overview, enable `Desktop development with C++`. ( https://visualstudio.microsoft.com ) + +##### All platforms + +1. Install NodeJS (tested with v18.14.0), consider using NVM as a version manager. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme) +1. Open a terminal +1. Go to the vendor/codex-contracts-eth folder: `cd //vendor/codex-contracts-eth/` +1. `npm install` -> Should complete with the number of packages added and an overview of known vulnerabilities. +1. `npm test` -> Should output test results. May take a minute. + +Before the integration tests are started, you must start the Ethereum test node manually. +1. Open a terminal +1. Go to the vendor/codex-contracts-eth folder: `cd //vendor/codex-contracts-eth/` +1. `npm start` -> This should launch Hardhat, and output a number of keys and a warning message. + +#### Run + The `testAll` target runs the same tests as `make test` and also runs tests for nim-codex's Ethereum contracts, as well a basic suite of integration tests. -To run `make testAll`, Node.js needs to be installed. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme) is a flexible means to do that and it works on Linux, macOS, and Windows + MSYS2. +To run `make testAll`. -With `nvm` installed, launch a separate terminal and download the latest LTS version of Node.js -```text -$ nvm install --lts -``` - -In that same terminal run -```text -$ cd repos/nim-codex/vendor/dagger-contracts && npm install && npm start -``` - -Those commands install and launch a [Hardhat](https://hardhat.org/) environment with nim-codex's Ethereum contracts. - -In the other terminal run -```text -$ make testAll +Use a new terminal to run: +```shell +make testAll ``` diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 00000000..1f70e331 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 Codex Storage + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE-APACHEv2 b/LICENSE-APACHEv2 new file mode 100644 index 00000000..fdfb0bd3 --- /dev/null +++ b/LICENSE-APACHEv2 @@ -0,0 +1,199 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 00000000..d13cc4b2 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile index 7200eb44..54ac04d2 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,30 @@ # at your option. This file may not be copied, modified, or distributed except # according to those terms. +# This is the Nim version used locally and in regular CI builds. +# Can be a specific version tag, a branch name, or a commit hash. +# Can be overridden by setting the NIM_COMMIT environment variable +# before calling make. +# +# For readability in CI, if NIM_COMMIT is set to "pinned", +# this will also default to the version pinned here. +# +# If NIM_COMMIT is set to "nimbusbuild", this will use the +# version pinned by nimbus-build-system. +PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 + +ifeq ($(NIM_COMMIT),) +NIM_COMMIT := $(PINNED_NIM_VERSION) +else ifeq ($(NIM_COMMIT),pinned) +NIM_COMMIT := $(PINNED_NIM_VERSION) +endif + +ifeq ($(NIM_COMMIT),nimbusbuild) +undefine NIM_COMMIT +else +export NIM_COMMIT +endif + SHELL := bash # the shell used internally by Make # used inside the included makefiles @@ -44,7 +68,11 @@ GIT_SUBMODULE_UPDATE := git submodule update --init --recursive else # "variables.mk" was included. Business as usual until the end of this file. # default target, because it's the first one that doesn't start with '.' -all: | test + +# Builds the codex binary +all: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims # must be included after the default target -include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk @@ -56,15 +84,12 @@ else NIM_PARAMS := $(NIM_PARAMS) -d:release endif -deps: | deps-common nat-libs codex.nims +deps: | deps-common nat-libs ifneq ($(USE_LIBBACKTRACE), 0) deps: | libbacktrace endif -#- deletes and recreates "codex.nims" which on Windows is a copy instead of a proper symlink update: | update-common - rm -rf codex.nims && \ - $(MAKE) codex.nims $(HANDLE_OUTPUT) # detecting the os ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10... @@ -79,31 +104,27 @@ endif # Builds and run a part of the test suite test: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim test $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims # Builds and runs the smart contract tests testContracts: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims # Builds and runs the integration tests testIntegration: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims -# Builds and runs all tests +# Builds and runs all tests (except for Taiko L2 tests) testAll: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim testAll $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims -# Builds the codex binary -exec: | build deps +# Builds and runs Taiko L2 tests +testTaiko: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim codex $(NIM_PARAMS) codex.nims - -# symlink -codex.nims: - ln -s codex.nimble $@ + $(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) codex.nims # nim-libbacktrace LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 @@ -128,8 +149,15 @@ coverage: shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info echo -e $(BUILD_MSG) "coverage/report/index.html" genhtml coverage/coverage.f.info --output-directory coverage/report + +show-coverage: if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi +coverage-script: build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim coverage $(NIM_PARAMS) build.nims + echo "Run `make show-coverage` to view coverage results" + # usual cleaning clean: | clean-common rm -rf build diff --git a/README.md b/README.md index 9f39cf12..22cbe219 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,11 @@ [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability) -[![CI](https://github.com/status-im/nim-codex/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/status-im/nim-codex/actions?query=workflow%3ACI+branch%3Amain) -[![Codecov](https://codecov.io/gh/status-im/nim-codex/branch/main/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/status-im/nim-codex) +[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster) +[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster) +[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex) [![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ) +![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex) ## Build and Run @@ -19,7 +21,7 @@ For detailed instructions on preparing to build nim-codex see [*Building Codex*] To build the project, clone it and run: ```bash -make update && make exec +make update && make ``` The executable will be placed under the `build` directory under the project root. @@ -29,6 +31,35 @@ Run the client with: ```bash build/codex ``` +## Configuration + +It is possible to configure a Codex node in several ways: + 1. CLI options + 2. Env. variable + 3. Config + +The order of priority is the same as above: Cli arguments > Env variables > Config file values. + +### Environment variables + +In order to set a configuration option using environment variables, first find the desired CLI option +and then transform it in the following way: + + 1. prepend it with `CODEX_` + 2. make it uppercase + 3. replace `-` with `_` + +For example, to configure `--log-level`, use `CODEX_LOG_LEVEL` as the environment variable name. + +### Configuration file + +A [TOML](https://toml.io/en/) configuration file can also be used to set configuration values. Configuration option names and corresponding values are placed in the file, separated by `=`. Configuration option names can be obtained from the `codex --help` command, and should not include the `--` prefix. For example, a node's log level (`--log-level`) can be configured using TOML as follows: + +```toml +log-level = "trace" +``` + +The Codex node can then read the configuration from this file using the `--config-file` CLI parameter, like `codex --config-file=/path/to/your/config.toml`. ### CLI Options @@ -40,104 +71,77 @@ codex [OPTIONS]... command The following options are available: - --log-level Sets the log level [=LogLevel.INFO]. + --config-file Loads the configuration from a TOML file [=none]. + --log-level Sets the log level [=info]. --metrics Enable the metrics server [=false]. --metrics-address Listening address of the metrics server [=127.0.0.1]. --metrics-port Listening HTTP port of the metrics server [=8008]. - -d, --data-dir The directory where codex will store configuration and data.. - -l, --listen-port Specifies one or more listening ports for the node to listen on. [=0]. - -i, --listen-ip The public IP [=0.0.0.0]. - --udp-port Specify the discovery (UDP) port [=8090]. - --net-privkey Source of network (secp256k1) private key file (random|) [=random]. - -b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network.. + -d, --data-dir The directory where codex will store configuration and data. + -i, --listen-addrs Multi Addresses to listen on [=/ip4/0.0.0.0/tcp/0]. + -a, --nat IP Addresses to announce behind a NAT [=127.0.0.1]. + -e, --disc-ip Discovery listen address [=0.0.0.0]. + -u, --disc-port Discovery (UDP) port [=8090]. + --net-privkey Source of network (secp256k1) private key file path or name [=key]. + -b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network. --max-peers The maximum number of peers to connect to [=160]. --agent-string Node agent string which is used as identifier in network [=Codex]. + --api-bindaddr The REST API bind address [=127.0.0.1]. -p, --api-port The REST Api port [=8080]. - -c, --cache-size The size in MiB of the block cache, 0 disables the cache [=100]. - --persistence Enables persistence mechanism, requires an Ethereum node [=false]. - --eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545]. - --eth-account The Ethereum account that is used for storage contracts [=EthAddress.none]. - --eth-deployment The json file describing the contract deployment [=string.none]. + --repo-kind Backend for main repo store (fs, sqlite) [=fs]. + -q, --storage-quota The size of the total storage quota dedicated to the node [=8589934592]. + -t, --block-ttl Default block timeout in seconds - 0 disables the ttl [=$DefaultBlockTtl]. + --block-mi Time interval in seconds - determines frequency of block maintenance cycle: how + often blocks are checked for expiration and cleanup + [=$DefaultBlockMaintenanceInterval]. + --block-mn Number of blocks to check every maintenance cycle [=1000]. + -c, --cache-size The size of the block cache, 0 disables the cache - might help on slow hardrives + [=0]. Available sub-commands: -codex initNode +codex persistence [OPTIONS]... command + +The following options are available: + + --eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545]. + --eth-account The Ethereum account that is used for storage contracts. + --eth-private-key File containing Ethereum private key for storage contracts. + --marketplace-address Address of deployed Marketplace contract. + --validator Enables validator, requires an Ethereum node [=false]. + --validator-max-slots Maximum number of slots that the validator monitors [=1000]. + +Available sub-commands: + +codex persistence prover [OPTIONS]... + +The following options are available: + + --circom-r1cs The r1cs file for the storage circuit. + --circom-wasm The wasm file for the storage circuit. + --circom-zkey The zkey file for the storage circuit. + --circom-no-zkey Ignore the zkey file - use only for testing! [=false]. + --proof-samples Number of samples to prove [=5]. + --max-slot-depth The maximum depth of the slot tree [=32]. + --max-dataset-depth The maximum depth of the dataset tree [=8]. + --max-block-depth The maximum depth of the network block merkle tree [=5]. + --max-cell-elements The maximum number of elements in a cell [=67]. ``` -### Example: running two Codex clients +#### Logging -```bash -build/codex --data-dir="$(pwd)/Codex1" -i=127.0.0.1 -``` +Codex uses [Chronicles](https://github.com/status-im/nim-chronicles) logging library, which allows great flexibility in working with logs. +Chronicles has the concept of topics, which categorize log entries into semantic groups. -This will start codex with a data directory pointing to `Codex1` under the current execution directory and announce itself on the DHT under `127.0.0.1`. +Using the `log-level` parameter, you can set the top-level log level like `--log-level="trace"`, but more importantly, +you can set log levels for specific topics like `--log-level="info; trace: marketplace,node; error: blockexchange"`, +which sets the top-level log level to `info` and then for topics `marketplace` and `node` sets the level to `trace` and so on. -To run a second client that automatically discovers nodes on the network, we need to get the Signed Peer Record (SPR) of first client, Client1. We can do this by querying the `/info` endpoint of the node's REST API. +### Guides -`curl http://127.0.0.1:8080/api/codex/v1/info` +To get acquainted with Codex, consider: +* running the simple [Codex Two-Client Test](docs/TwoClientTest.md) for a start, and; +* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](docs/Marketplace.md) using a local blockchain as well. -This should output information about Client1, including its PeerID, TCP/UDP addresses, data directory, and SPR: +## API -```json -{ - "id": "16Uiu2HAm92LGXYTuhtLaZzkFnsCx6FFJsNmswK6o9oPXFbSKHQEa", - "addrs": [ - "/ip4/0.0.0.0/udp/8090", - "/ip4/0.0.0.0/tcp/49336" - ], - "repo": "/repos/status-im/nim-codex/Codex1", - "spr": "spr:CiUIAhIhAmqg5fVU2yxPStLdUOWgwrkWZMHW2MHf6i6l8IjA4tssEgIDARpICicAJQgCEiECaqDl9VTbLE9K0t1Q5aDCuRZkwdbYwd_qLqXwiMDi2ywQ5v2VlAYaCwoJBH8AAAGRAh-aGgoKCAR_AAABBts3KkcwRQIhAPOKl38CviplVbMVnA_9q3N1K_nk5oGuNp7DWeOqiJzzAiATQ2acPyQvPxLU9YS-TiVo4RUXndRcwMFMX2Yjhw8k3A" -} -``` - -Now, let's start a second client, Client2. Because we're already using the default ports TCP (:8080) and UDP (:8090) for the first client, we have to specify new ports to avoid a collision. Additionally, we can specify the SPR from Client1 as the bootstrap node for discovery purposes, allowing Client2 to determine where content is located in the network. - -```bash -build/codex --data-dir="$(pwd)/Codex2" -i=127.0.0.1 --api-port=8081 --udp-port=8091 --bootstrap-node=spr:CiUIAhIhAmqg5fVU2yxPStLdUOWgwrkWZMHW2MHf6i6l8IjA4tssEgIDARpICicAJQgCEiECaqDl9VTbLE9K0t1Q5aDCuRZkwdbYwd_qLqXwiMDi2ywQ5v2VlAYaCwoJBH8AAAGRAh-aGgoKCAR_AAABBts3KkcwRQIhAPOKl38CviplVbMVnA_9q3N1K_nk5oGuNp7DWeOqiJzzAiATQ2acPyQvPxLU9YS-TiVo4RUXndRcwMFMX2Yjhw8k3A -``` - -There are now two clients running. We could upload a file to Client1 and download that file (given its CID) using Client2, by using the clients' REST API. - -## Interacting with the client - -The client exposes a REST API that can be used to interact with the clients. These commands could be invoked with any HTTP client, however the following endpoints assume the use of the `curl` command. - -### `/api/codex/v1/connect/{peerId}` - -Connect to a peer identified by its peer id. Takes an optional `addrs` parameter with a list of valid [multiaddresses](https://multiformats.io/multiaddr/). If `addrs` is absent, the peer will be discovered over the DHT. - -Example: - -```bash -curl "127.0.0.1:8080/api/codex/v1/connect/?addrs=" -``` - -### `/api/codex/v1/download/{id}` - -Download data identified by a `Cid`. - -Example: - -```bash - curl -vvv "127.0.0.1:8080/api/codex/v1/download/" --output - ``` - -### `/api/codex/v1/upload` - -Upload a file, upon success returns the `Cid` of the uploaded file. - -Example: - -```bash -curl -vvv -H "content-type: application/octet-stream" -H Expect: -T "" "127.0.0.1:8080/api/codex/v1/upload" -X POST -``` - -### `/api/codex/v1/info` - -Get useful node info such as its peer id, address and SPR. - -Example: - -```bash -curl -vvv "127.0.0.1:8080/api/codex/v1/info" -``` +The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). diff --git a/benchmarks/.gitignore b/benchmarks/.gitignore new file mode 100644 index 00000000..6f697152 --- /dev/null +++ b/benchmarks/.gitignore @@ -0,0 +1,2 @@ +ceremony +circuit_bench_* diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 00000000..0cff64e9 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,33 @@ + +## Benchmark Runner + +Modify `runAllBenchmarks` proc in `run_benchmarks.nim` to the desired parameters and variations. + +Then run it: + +```sh +nim c -r run_benchmarks +``` + +By default all circuit files for each combinations of circuit args will be generated in a unique folder named like: + nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3 + +Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed. + +You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition. + +The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this. + +## Codex Ark Circom CLI + +Runs Codex's prover setup with Ark / Circom. + +Compile: +```sh +nim c codex_ark_prover_cli.nim +``` + +Run to see usage: +```sh +./codex_ark_prover_cli.nim -h +``` diff --git a/benchmarks/config.nims b/benchmarks/config.nims new file mode 100644 index 00000000..c5c2c5dc --- /dev/null +++ b/benchmarks/config.nims @@ -0,0 +1,15 @@ +--path: + ".." +--path: + "../tests" +--threads: + on +--tlsEmulation: + off +--d: + release + +# when not defined(chronicles_log_level): +# --define:"chronicles_log_level:NONE" # compile all log statements +# --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime +# --"import":"logging" # ensure that logging is ignored at runtime diff --git a/benchmarks/create_circuits.nim b/benchmarks/create_circuits.nim new file mode 100644 index 00000000..911dcd51 --- /dev/null +++ b/benchmarks/create_circuits.nim @@ -0,0 +1,187 @@ +import std/[hashes, json, strutils, strformat, os, osproc, uri] + +import ./utils + +type + CircuitEnv* = object + nimCircuitCli*: string + circuitDirIncludes*: string + ptauPath*: string + ptauUrl*: Uri + codexProjDir*: string + + CircuitArgs* = object + depth*: int + maxslots*: int + cellsize*: int + blocksize*: int + nsamples*: int + entropy*: int + seed*: int + nslots*: int + ncells*: int + index*: int + +proc findCodexProjectDir(): string = + ## find codex proj dir -- assumes this script is in codex/benchmarks + result = currentSourcePath().parentDir.parentDir + +func default*(tp: typedesc[CircuitEnv]): CircuitEnv = + let codexDir = findCodexProjectDir() + result.nimCircuitCli = + codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" / + "proof_input" / "cli" + result.circuitDirIncludes = + codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit" + result.ptauPath = + codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau" + result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri + result.codexProjDir = codexDir + +proc check*(env: var CircuitEnv) = + ## check that the CWD of script is in the codex parent + let codexProjDir = findCodexProjectDir() + echo "\n\nFound project dir: ", codexProjDir + + let snarkjs = findExe("snarkjs") + if snarkjs == "": + echo dedent""" + ERROR: must install snarkjs first + + npm install -g snarkjs@latest + """ + + let circom = findExe("circom") + if circom == "": + echo dedent""" + ERROR: must install circom first + + git clone https://github.com/iden3/circom.git + cargo install --path circom + """ + + if snarkjs == "" or circom == "": + quit 2 + + echo "Found SnarkJS: ", snarkjs + echo "Found Circom: ", circom + + if not env.nimCircuitCli.fileExists: + echo "Nim Circuit reference cli not found: ", env.nimCircuitCli + echo "Building Circuit reference cli...\n" + withDir env.nimCircuitCli.parentDir: + runit "nimble build -d:release --styleCheck:off cli" + echo "CWD: ", getCurrentDir() + assert env.nimCircuitCli.fileExists() + + echo "Found NimCircuitCli: ", env.nimCircuitCli + echo "Found Circuit Path: ", env.circuitDirIncludes + echo "Found PTAU file: ", env.ptauPath + +proc downloadPtau*(ptauPath: string, ptauUrl: Uri) = + ## download ptau file using curl if needed + if not ptauPath.fileExists: + echo "Ceremony file not found, downloading..." + createDir ptauPath.parentDir + withDir ptauPath.parentDir: + runit fmt"curl --output '{ptauPath}' '{$ptauUrl}/{ptauPath.splitPath().tail}'" + else: + echo "Found PTAU file at: ", ptauPath + +proc getCircuitBenchStr*(args: CircuitArgs): string = + for f, v in fieldPairs(args): + result &= "_" & f & $v + +proc getCircuitBenchPath*(args: CircuitArgs, env: CircuitEnv): string = + ## generate folder name for unique circuit args + result = env.codexProjDir / "benchmarks/circuit_bench" & getCircuitBenchStr(args) + +proc generateCircomAndSamples*(args: CircuitArgs, env: CircuitEnv, name: string) = + ## run nim circuit and sample generator + var cliCmd = env.nimCircuitCli + for f, v in fieldPairs(args): + cliCmd &= " --" & f & "=" & $v + + if not "input.json".fileExists: + echo "Generating Circom Files..." + runit fmt"{cliCmd} -v --circom={name}.circom --output=input.json" + +proc createCircuit*( + args: CircuitArgs, + env: CircuitEnv, + name = "proof_main", + circBenchDir = getCircuitBenchPath(args, env), + someEntropy = "some_entropy_75289v3b7rcawcsyiur", + doGenerateWitness = false, +): tuple[dir: string, name: string] = + ## Generates all the files needed for to run a proof circuit. Downloads the PTAU file if needed. + ## + ## All needed circuit files will be generated as needed. + ## They will be located in `circBenchDir` which defaults to a folder like: + ## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3` + ## with all the given CircuitArgs. + ## + let circdir = circBenchDir + + downloadPtau env.ptauPath, env.ptauUrl + + echo "Creating circuit dir: ", circdir + createDir circdir + withDir circdir: + writeFile("circuit_params.json", pretty(%*args)) + let + inputs = circdir / "input.json" + zkey = circdir / fmt"{name}.zkey" + wasm = circdir / fmt"{name}.wasm" + r1cs = circdir / fmt"{name}.r1cs" + wtns = circdir / fmt"{name}.wtns" + + generateCircomAndSamples(args, env, name) + + if not wasm.fileExists or not r1cs.fileExists: + runit fmt"circom --r1cs --wasm --O2 -l{env.circuitDirIncludes} {name}.circom" + moveFile fmt"{name}_js" / fmt"{name}.wasm", fmt"{name}.wasm" + echo "Found wasm: ", wasm + echo "Found r1cs: ", r1cs + + if not zkey.fileExists: + echo "ZKey not found, generating..." + putEnv "NODE_OPTIONS", "--max-old-space-size=8192" + if not fmt"{name}_0000.zkey".fileExists: + runit fmt"snarkjs groth16 setup {r1cs} {env.ptauPath} {name}_0000.zkey" + echo fmt"Generated {name}_0000.zkey" + + let cmd = + fmt"snarkjs zkey contribute {name}_0000.zkey {name}_0001.zkey --name='1st Contributor Name'" + echo "CMD: ", cmd + let cmdRes = execCmdEx(cmd, options = {}, input = someEntropy & "\n") + assert cmdRes.exitCode == 0 + + moveFile fmt"{name}_0001.zkey", fmt"{name}.zkey" + removeFile fmt"{name}_0000.zkey" + + if not wtns.fileExists and doGenerateWitness: + runit fmt"node generate_witness.js {wtns} ../input.json ../witness.wtns" + + return (circdir, name) + +when isMainModule: + echo "findCodexProjectDir: ", findCodexProjectDir() + ## test run creating a circuit + var env = CircuitEnv.default() + env.check() + + let args = CircuitArgs( + depth: 32, # maximum depth of the slot tree + maxslots: 256, # maximum number of slots + cellsize: 2048, # cell size in bytes + blocksize: 65536, # block size in bytes + nsamples: 5, # number of samples to prove + entropy: 1234567, # external randomness + seed: 12345, # seed for creating fake data + nslots: 11, # number of slots in the dataset + index: 3, # which slot we prove (0..NSLOTS-1) + ncells: 512, # number of cells in this slot + ) + let benchenv = createCircuit(args, env) + echo "\nBench dir:\n", benchenv diff --git a/benchmarks/run_benchmarks.nim b/benchmarks/run_benchmarks.nim new file mode 100644 index 00000000..f69c13e0 --- /dev/null +++ b/benchmarks/run_benchmarks.nim @@ -0,0 +1,105 @@ +import std/[sequtils, strformat, os, options, importutils] +import std/[times, os, strutils, terminal] + +import pkg/questionable +import pkg/questionable/results +import pkg/datastore + +import pkg/codex/[rng, stores, merkletree, codextypes, slots] +import pkg/codex/utils/[json, poseidon2digest] +import pkg/codex/slots/[builder, sampler/utils, backends/helpers] +import pkg/constantine/math/[arithmetic, io/io_bigints, io/io_fields] + +import ./utils +import ./create_circuits + +type CircuitFiles* = object + r1cs*: string + wasm*: string + zkey*: string + inputs*: string + +proc runArkCircom(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) = + echo "Loading sample proof..." + var + inputData = files.inputs.readFile() + inputJson = !JsonNode.parse(inputData) + proofInputs = Poseidon2Hash.jsonToProofInput(inputJson) + circom = CircomCompat.init( + files.r1cs, + files.wasm, + files.zkey, + slotDepth = args.depth, + numSamples = args.nsamples, + ) + defer: + circom.release() # this comes from the rust FFI + + echo "Sample proof loaded..." + echo "Proving..." + + let nameArgs = getCircuitBenchStr(args) + var proof: CircomProof + benchmark fmt"prover-{nameArgs}", benchmarkLoops: + proof = circom.prove(proofInputs).tryGet + + var verRes: bool + benchmark fmt"verify-{nameArgs}", benchmarkLoops: + verRes = circom.verify(proof, proofInputs).tryGet + echo "verify result: ", verRes + +proc runRapidSnark(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) = + # time rapidsnark ${CIRCUIT_MAIN}.zkey witness.wtns proof.json public.json + + echo "generating the witness..." + ## TODO + +proc runBenchmark(args: CircuitArgs, env: CircuitEnv, benchmarkLoops: int) = + ## execute benchmarks given a set of args + ## will create a folder in `benchmarks/circuit_bench_$(args)` + ## + + let env = createCircuit(args, env) + + ## TODO: copy over testcircomcompat proving + let files = CircuitFiles( + r1cs: env.dir / fmt"{env.name}.r1cs", + wasm: env.dir / fmt"{env.name}.wasm", + zkey: env.dir / fmt"{env.name}.zkey", + inputs: env.dir / fmt"input.json", + ) + + runArkCircom(args, files, benchmarkLoops) + +proc runAllBenchmarks*() = + echo "Running benchmark" + # setup() + var env = CircuitEnv.default() + env.check() + + var args = CircuitArgs( + depth: 32, # maximum depth of the slot tree + maxslots: 256, # maximum number of slots + cellsize: 2048, # cell size in bytes + blocksize: 65536, # block size in bytes + nsamples: 1, # number of samples to prove + entropy: 1234567, # external randomness + seed: 12345, # seed for creating fake data + nslots: 11, # number of slots in the dataset + index: 3, # which slot we prove (0..NSLOTS-1) + ncells: 512, # number of cells in this slot + ) + + let + numberSamples = 3 + benchmarkLoops = 5 + + for i in 1 .. numberSamples: + args.nsamples = i + stdout.styledWriteLine(fgYellow, "\nbenchmarking args: ", $args) + runBenchmark(args, env, benchmarkLoops) + + printBenchMarkSummaries() + +when isMainModule: + runAllBenchmarks() diff --git a/benchmarks/utils.nim b/benchmarks/utils.nim new file mode 100644 index 00000000..af5cdc25 --- /dev/null +++ b/benchmarks/utils.nim @@ -0,0 +1,76 @@ +import std/tables + +template withDir*(dir: string, blk: untyped) = + ## set working dir for duration of blk + let prev = getCurrentDir() + try: + setCurrentDir(dir) + `blk` + finally: + setCurrentDir(prev) + +template runit*(cmd: string) = + ## run shell commands and verify it runs without an error code + echo "RUNNING: ", cmd + let cmdRes = execShellCmd(cmd) + echo "STATUS: ", cmdRes + assert cmdRes == 0 + +var benchRuns* = newTable[string, tuple[avgTimeSec: float, count: int]]() + +func avg(vals: openArray[float]): float = + for v in vals: + result += v / vals.len().toFloat() + +template benchmark*(name: untyped, count: int, blk: untyped) = + let benchmarkName: string = name + ## simple benchmarking of a block of code + var runs = newSeqOfCap[float](count) + for i in 1 .. count: + block: + let t0 = epochTime() + `blk` + let elapsed = epochTime() - t0 + runs.add elapsed + + var elapsedStr = "" + for v in runs: + elapsedStr &= ", " & v.formatFloat(format = ffDecimal, precision = 3) + stdout.styledWriteLine( + fgGreen, "CPU Time [", benchmarkName, "] ", "avg(", $count, "): ", elapsedStr, " s" + ) + benchRuns[benchmarkName] = (runs.avg(), count) + +template printBenchMarkSummaries*(printRegular=true, printTsv=true) = + if printRegular: + echo "" + for k, v in benchRuns: + echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k + + if printTsv: + echo "" + echo "name", "\t", "avgTimeSec", "\t", "count" + for k, v in benchRuns: + echo k, "\t", v.avgTimeSec, "\t", v.count + + +import std/math + +func floorLog2*(x: int): int = + var k = -1 + var y = x + while (y > 0): + k += 1 + y = y shr 1 + return k + +func ceilingLog2*(x: int): int = + if (x == 0): + return -1 + else: + return (floorLog2(x - 1) + 1) + +func checkPowerOfTwo*(x: int, what: string): int = + let k = ceilingLog2(x) + assert(x == 2 ^ k, ("`" & what & "` is expected to be a power of 2")) + return x diff --git a/build.nims b/build.nims new file mode 100644 index 00000000..33d7b623 --- /dev/null +++ b/build.nims @@ -0,0 +1,91 @@ +mode = ScriptMode.Verbose + + +### Helper functions +proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + when compiles(commandLineParams): + for param in commandLineParams(): + extra_params &= " " & param + else: + for i in 2..= 1.2.0", - "asynctest >= 0.3.2 & < 0.4.0", - "bearssl >= 0.1.4", - "chronicles >= 0.7.2", - "chronos >= 2.5.2", - "confutils", - "ethers >= 0.2.0 & < 0.3.0", - "libbacktrace", - "libp2p", - "metrics", - "nimcrypto >= 0.4.1", - "nitro >= 0.5.1 & < 0.6.0", - "presto", - "protobuf_serialization >= 0.2.0 & < 0.3.0", - "questionable >= 0.10.6 & < 0.11.0", - "secp256k1", - "stew", - "upraises >= 0.1.0 & < 0.2.0", - "lrucache", - "leopard >= 0.1.0 & < 0.2.0", - "blscurve", - "libp2pdht", - "eth" - -when declared(namedBin): - namedBin = { - "codex/codex": "codex" - }.toTable() - -### Helper functions -proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = - if not dirExists "build": - mkDir "build" - # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" - var extra_params = params - when compiles(commandLineParams): - for param in commandLineParams: - extra_params &= " " & param - else: - for i in 2.. 0: + peers[hash(address) mod peers.len].some + elif b.peers.len > 0: + toSeq(b.peers)[hash(address) mod b.peers.len].some + else: + BlockExcPeerCtx.none + + if peer =? maybePeer: + asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id) + b.pendingBlocks.setInFlight(address) + await b.sendWantBlock(address, peer) + codex_block_exchange_want_block_lists_sent.inc() + await b.sendWantHave(address, @[peer], toSeq(b.peers)) + codex_block_exchange_want_have_lists_sent.inc() + + # Don't let timeouts bubble up. We can't be too broad here or we break + # cancellations. + try: + success await blockFuture + except AsyncTimeoutError as err: + failure err + +proc requestBlock*( + b: BlockExcEngine, + cid: Cid +): Future[?!Block] = + b.requestBlock(BlockAddress.init(cid)) proc blockPresenceHandler*( b: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]) {.async.} = - ## Handle block presence - ## - - trace "Received presence update for peer", peer, blocks = blocks.len let peerCtx = b.peers.get(peer) wantList = toSeq(b.pendingBlocks.wantList) @@ -211,12 +224,6 @@ proc blockPresenceHandler*( for blk in blocks: if presence =? Presence.init(blk): - logScope: - cid = presence.cid - have = presence.have - price = presence.price - - trace "Updating precense" peerCtx.setPresence(presence) let @@ -226,166 +233,237 @@ proc blockPresenceHandler*( ) if dontWantCids.len > 0: - trace "Cleaning peer haves", peer, count = dontWantCids.len peerCtx.cleanPresence(dontWantCids) - trace "Peer want/have", items = peerHave.len, wantList = wantList.len let wantCids = wantList.filterIt( it in peerHave ) if wantCids.len > 0: - trace "Getting blocks based on updated precense", peer, count = wantCids.len + trace "Peer has blocks in our wantList", peer, wantCount = wantCids.len discard await allFinished( - wantCids.mapIt(b.requestBlock(it))) - trace "Requested blocks based on updated precense", peer, count = wantCids.len + wantCids.mapIt(b.sendWantBlock(it, peerCtx))) # if none of the connected peers report our wants in their have list, # fire up discovery b.discovery.queueFindBlocksReq( - toSeq(b.pendingBlocks.wantList) + toSeq(b.pendingBlocks.wantListCids) .filter do(cid: Cid) -> bool: - not b.peers.anyIt( cid in it.peerHave )) - -proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} = - trace "Schedule a task for new blocks", items = blocks.len + not b.peers.anyIt( cid in it.peerHaveCids )) +proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = let - cids = blocks.mapIt( it.cid ) + cids = blocksDelivery.mapIt( it.blk.cid ) # schedule any new peers to provide blocks to for p in b.peers: for c in cids: # for each cid # schedule a peer if it wants at least one cid # and we have it in our local store - if c in p.peerWants: + if c in p.peerWantsCids: if await (c in b.localStore): if b.scheduleTask(p): trace "Task scheduled for peer", peer = p.id else: - trace "Unable to schedule task for peer", peer = p.id + warn "Unable to schedule task for peer", peer = p.id break # do next peer -proc resolveBlocks*(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} = - ## Resolve pending blocks from the pending blocks manager - ## and schedule any new task to be ran - ## +proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = + ## Tells neighboring peers that we're no longer interested in a block. + trace "Sending block request cancellations to peers", addrs = addrs.len - trace "Resolving blocks", blocks = blocks.len + let failed = (await allFinished( + b.peers.mapIt( + b.network.request.sendWantCancellations( + peer = it.id, + addresses = addrs)))) + .filterIt(it.failed) - b.pendingBlocks.resolve(blocks) - await b.scheduleTasks(blocks) - b.discovery.queueProvideBlocksReq(blocks.mapIt( it.cid )) + if failed.len > 0: + warn "Failed to send block request cancellations to peers", peers = failed.len + +proc getAnnouceCids(blocksDelivery: seq[BlockDelivery]): seq[Cid] = + var cids = initHashSet[Cid]() + for bd in blocksDelivery: + if bd.address.leaf: + cids.incl(bd.address.treeCid) + else: + without isM =? bd.address.cid.isManifest, err: + warn "Unable to determine if cid is manifest" + continue + if isM: + cids.incl(bd.address.cid) + return cids.toSeq + +proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = + b.pendingBlocks.resolve(blocksDelivery) + await b.scheduleTasks(blocksDelivery) + let announceCids = getAnnouceCids(blocksDelivery) + await b.cancelBlocks(blocksDelivery.mapIt(it.address)) + + b.discovery.queueProvideBlocksReq(announceCids) + +proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} = + await b.resolveBlocks( + blocks.mapIt( + BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid) + ))) proc payForBlocks(engine: BlockExcEngine, peer: BlockExcPeerCtx, - blocks: seq[bt.Block]) {.async.} = - trace "Paying for blocks", blocks = blocks.len - + blocksDelivery: seq[BlockDelivery]) {.async.} = let sendPayment = engine.network.request.sendPayment - price = peer.price(blocks.mapIt(it.cid)) + price = peer.price(blocksDelivery.mapIt(it.address)) if payment =? engine.wallet.pay(peer, price): - trace "Sending payment for blocks", price + trace "Sending payment for blocks", price, len = blocksDelivery.len await sendPayment(peer.id, payment) -proc blocksHandler*( +proc validateBlockDelivery( + b: BlockExcEngine, + bd: BlockDelivery): ?!void = + if bd.address notin b.pendingBlocks: + return failure("Received block is not currently a pending block") + + if bd.address.leaf: + without proof =? bd.proof: + return failure("Missing proof") + + if proof.index != bd.address.index: + return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index) + + without leaf =? bd.blk.cid.mhash.mapFailure, err: + return failure("Unable to get mhash from cid for block, nested err: " & err.msg) + + without treeRoot =? bd.address.treeCid.mhash.mapFailure, err: + return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg) + + if err =? proof.verify(leaf, treeRoot).errorOption: + return failure("Unable to verify proof for block, nested err: " & err.msg) + + else: # not leaf + if bd.address.cid != bd.blk.cid: + return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid) + + return success() + +proc blocksDeliveryHandler*( b: BlockExcEngine, peer: PeerId, - blocks: seq[bt.Block]) {.async.} = - ## handle incoming blocks - ## + blocksDelivery: seq[BlockDelivery]) {.async.} = + trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt($it.address)).join(",") - trace "Got blocks from peer", peer, len = blocks.len - for blk in blocks: - if isErr (await b.localStore.putBlock(blk)): - trace "Unable to store block", cid = blk.cid + var validatedBlocksDelivery: seq[BlockDelivery] + for bd in blocksDelivery: + logScope: + peer = peer + address = bd.address + + if err =? b.validateBlockDelivery(bd).errorOption: + warn "Block validation failed", msg = err.msg + continue + + if err =? (await b.localStore.putBlock(bd.blk)).errorOption: + error "Unable to store block", err = err.msg + continue + + if bd.address.leaf: + without proof =? bd.proof: + error "Proof expected for a leaf block delivery" + continue + if err =? (await b.localStore.putCidAndProof( + bd.address.treeCid, + bd.address.index, + bd.blk.cid, + proof)).errorOption: + + error "Unable to store proof and cid for a block" + continue + + validatedBlocksDelivery.add(bd) + + await b.resolveBlocks(validatedBlocksDelivery) + codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) - await b.resolveBlocks(blocks) let peerCtx = b.peers.get(peer) if peerCtx != nil: - # we don't care about this blocks anymore, lets cleanup the list - await b.payForBlocks(peerCtx, blocks) - peerCtx.cleanPresence(blocks.mapIt( it.cid )) + await b.payForBlocks(peerCtx, blocksDelivery) + ## shouldn't we remove them from the want-list instead of this: + peerCtx.cleanPresence(blocksDelivery.mapIt( it.address )) proc wantListHandler*( b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} = - ## Handle incoming want lists - ## - - trace "Got want list for peer", peer, items = wantList.entries.len - let peerCtx = b.peers.get(peer) + let + peerCtx = b.peers.get(peer) if isNil(peerCtx): return var - precense: seq[BlockPresence] + presence: seq[BlockPresence] for e in wantList.entries: let - idx = peerCtx.peerWants.find(e) + idx = peerCtx.peerWants.findIt(it.address == e.address) logScope: peer = peerCtx.id - cid = e.cid + address = e.address wantType = $e.wantType if idx < 0: # updating entry - trace "Processing new want list entry", cid = e.cid - let - have = await e.cid in b.localStore + have = await e.address in b.localStore price = @( b.pricing.get(Pricing(price: 0.u256)) .price.toBytesBE) + if e.wantType == WantType.WantHave: + codex_block_exchange_want_have_lists_received.inc() + if not have and e.sendDontHave: - trace "Adding dont have entry to precense response", cid = e.cid - precense.add( + presence.add( BlockPresence( - cid: e.cid.data.buffer, + address: e.address, `type`: BlockPresenceType.DontHave, price: price)) elif have and e.wantType == WantType.WantHave: - trace "Adding have entry to precense response", cid = e.cid - precense.add( + presence.add( BlockPresence( - cid: e.cid.data.buffer, + address: e.address, `type`: BlockPresenceType.Have, price: price)) elif e.wantType == WantType.WantBlock: - trace "Added entry to peer's want blocks list", cid = e.cid peerCtx.peerWants.add(e) + codex_block_exchange_want_block_lists_received.inc() else: # peer doesn't want this block anymore if e.cancel: - trace "Removing entry from peer want list" peerCtx.peerWants.del(idx) else: - trace "Updating entry in peer want list" # peer might want to ask for the same cid with # different want params peerCtx.peerWants[idx] = e # update entry - if precense.len > 0: - trace "Sending precense to remote", items = precense.len - await b.network.request.sendPresence(peer, precense) + if presence.len > 0: + trace "Sending presence to remote", items = presence.mapIt($it).join(",") + await b.network.request.sendPresence(peer, presence) if not b.scheduleTask(peerCtx): - trace "Unable to schedule task for peer", peer + warn "Unable to schedule task for peer", peer proc accountHandler*( engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} = - let context = engine.peers.get(peer) + let + context = engine.peers.get(peer) if context.isNil: return @@ -403,7 +481,8 @@ proc paymentHandler*( return if channel =? context.paymentChannel: - let sender = account.address + let + sender = account.address discard engine.wallet.acceptPayment(channel, Asset, sender, payment) else: context.paymentChannel = engine.wallet.acceptChannel(payment).option @@ -413,6 +492,8 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} = ## list exchange ## + trace "Setting up peer", peer + if peer notin b.peers: trace "Setting up new peer", peer b.peers.add(BlockExcPeerCtx( @@ -421,9 +502,11 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} = trace "Added peer", peers = b.peers.len # broadcast our want list, the other peer will do the same - if b.pendingBlocks.len > 0: + if b.pendingBlocks.wantListLen > 0: + trace "Sending our want list to a peer", peer + let cids = toSeq(b.pendingBlocks.wantList) await b.network.request.sendWantList( - peer, toSeq(b.pendingBlocks.wantList), full = true) + peer, cids, full = true) if address =? b.pricing.?address: await b.network.request.sendAccount(peer, Account(address: address)) @@ -438,8 +521,6 @@ proc dropPeer*(b: BlockExcEngine, peer: PeerId) = b.peers.remove(peer) proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = - trace "Handling task for peer", peer = task.id - # Send to the peer blocks he wants to get, # if they present in our local store @@ -448,38 +529,53 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = var wantsBlocks = task.peerWants.filterIt( - it.wantType == WantType.WantBlock + it.wantType == WantType.WantBlock and not it.inFlight ) - if wantsBlocks.len > 0: - trace "Got peer want blocks list", items = wantsBlocks.len + proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) = + for peerWant in task.peerWants.mitems: + if peerWant.address in addresses: + peerWant.inFlight = inFlight + if wantsBlocks.len > 0: + # Mark wants as in-flight. + let wantAddresses = wantsBlocks.mapIt(it.address) + updateInFlight(wantAddresses, true) wantsBlocks.sort(SortOrder.Descending) - let - blockFuts = await allFinished(wantsBlocks.mapIt( - b.localStore.getBlock(it.cid) - )) + proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} = + if e.address.leaf: + (await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( + (blkAndProof: (Block, CodexProof)) => + BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some) + ) + else: + (await b.localStore.getBlock(e.address)).map( + (blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) + ) - # Extract successfully received blocks let - blocks = blockFuts + blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) + blocksDelivery = blocksDeliveryFut .filterIt(it.completed and it.read.isOk) .mapIt(it.read.get) - if blocks.len > 0: - trace "Sending blocks to peer", peer = task.id, blocks = blocks.len - await b.network.request.sendBlocks( - task.id, - blocks) + # All the wants that failed local lookup must be set to not-in-flight again. + let + successAddresses = blocksDelivery.mapIt(it.address) + failedAddresses = wantAddresses.filterIt(it notin successAddresses) + updateInFlight(failedAddresses, false) - trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len - # Remove successfully sent blocks - task.peerWants.keepIf( - proc(e: Entry): bool = - not blocks.anyIt( it.cid == e.cid ) + if blocksDelivery.len > 0: + trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt($it.address)).join(",") + await b.network.request.sendBlocksDelivery( + task.id, + blocksDelivery ) - trace "Removed entries from peerWants", items = task.peerWants.len + + codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) + + task.peerWants.keepItIf(it.address notin successAddresses) proc blockexcTaskRunner(b: BlockExcEngine) {.async.} = ## process tasks @@ -490,21 +586,24 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async.} = let peerCtx = await b.taskQueue.pop() - trace "Got new task from queue", peerId = peerCtx.id await b.taskHandler(peerCtx) - trace "Exiting blockexc task runner" + info "Exiting blockexc task runner" proc new*( - T: type BlockExcEngine, - localStore: BlockStore, - wallet: WalletRef, - network: BlockExcNetwork, - discovery: DiscoveryEngine, - peerStore: PeerCtxStore, - pendingBlocks: PendingBlocksManager, - concurrentTasks = DefaultConcurrentTasks, - peersPerRequest = DefaultMaxPeersPerRequest): T = + T: type BlockExcEngine, + localStore: BlockStore, + wallet: WalletRef, + network: BlockExcNetwork, + discovery: DiscoveryEngine, + peerStore: PeerCtxStore, + pendingBlocks: PendingBlocksManager, + concurrentTasks = DefaultConcurrentTasks, + peersPerRequest = DefaultMaxPeersPerRequest, + blockFetchTimeout = DefaultBlockTimeout, +): BlockExcEngine = + ## Create new block exchange engine instance + ## let engine = BlockExcEngine( @@ -516,7 +615,8 @@ proc new*( wallet: wallet, concurrentTasks: concurrentTasks, taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), - discovery: discovery) + discovery: discovery, + blockFetchTimeout: blockFetchTimeout) proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = if event.kind == PeerEventKind.Joined: @@ -538,10 +638,10 @@ proc new*( presence: seq[BlockPresence]): Future[void] {.gcsafe.} = engine.blockPresenceHandler(peer, presence) - proc blocksHandler( + proc blocksDeliveryHandler( peer: PeerId, - blocks: seq[bt.Block]): Future[void] {.gcsafe.} = - engine.blocksHandler(peer, blocks) + blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} = + engine.blocksDeliveryHandler(peer, blocksDelivery) proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = engine.accountHandler(peer, account) @@ -551,7 +651,7 @@ proc new*( network.handlers = BlockExcHandlers( onWantList: blockWantListHandler, - onBlocks: blocksHandler, + onBlocksDelivery: blocksDeliveryHandler, onPresence: blockPresenceHandler, onAccount: accountHandler, onPayment: paymentHandler) diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index dd24b1d5..9c5efc0b 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -8,21 +8,26 @@ ## those terms. import std/tables +import std/monotimes import pkg/upraises push: {.upraises: [].} -import pkg/questionable -import pkg/chronicles import pkg/chronos import pkg/libp2p +import pkg/metrics +import ../protobuf/blockexc import ../../blocktype +import ../../logutils logScope: topics = "codex pendingblocks" +declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests") +declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us") + const DefaultBlockTimeout* = 10.minutes @@ -30,83 +35,123 @@ type BlockReq* = object handle*: Future[Block] inFlight*: bool + startTime*: int64 PendingBlocksManager* = ref object of RootObj - blocks*: Table[Cid, BlockReq] # pending Block requests + blocks*: Table[BlockAddress, BlockReq] # pending Block requests + +proc updatePendingBlockGauge(p: PendingBlocksManager) = + codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) proc getWantHandle*( p: PendingBlocksManager, - cid: Cid, + address: BlockAddress, timeout = DefaultBlockTimeout, inFlight = false): Future[Block] {.async.} = ## Add an event for a block ## try: - if cid notin p.blocks: - p.blocks[cid] = BlockReq( + if address notin p.blocks: + p.blocks[address] = BlockReq( handle: newFuture[Block]("pendingBlocks.getWantHandle"), - inFlight: inFlight) + inFlight: inFlight, + startTime: getMonoTime().ticks) - trace "Adding pending future for block", cid, inFlight = p.blocks[cid].inFlight - - return await p.blocks[cid].handle.wait(timeout) + p.updatePendingBlockGauge() + return await p.blocks[address].handle.wait(timeout) except CancelledError as exc: - trace "Blocks cancelled", exc = exc.msg, cid + trace "Blocks cancelled", exc = exc.msg, address raise exc except CatchableError as exc: - trace "Pending WANT failed or expired", exc = exc.msg + error "Pending WANT failed or expired", exc = exc.msg # no need to cancel, it is already cancelled by wait() raise exc finally: - p.blocks.del(cid) + p.blocks.del(address) + p.updatePendingBlockGauge() + +proc getWantHandle*( + p: PendingBlocksManager, + cid: Cid, + timeout = DefaultBlockTimeout, + inFlight = false): Future[Block] = + p.getWantHandle(BlockAddress.init(cid), timeout, inFlight) proc resolve*( p: PendingBlocksManager, - blocks: seq[Block]) = + blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} = ## Resolve pending blocks ## - for blk in blocks: - # resolve any pending blocks - p.blocks.withValue(blk.cid, pending): - if not pending[].handle.completed: - trace "Resolving block", cid = blk.cid - pending[].handle.complete(blk) + for bd in blocksDelivery: + p.blocks.withValue(bd.address, blockReq): + if not blockReq.handle.finished: + let + startTime = blockReq.startTime + stopTime = getMonoTime().ticks + retrievalDurationUs = (stopTime - startTime) div 1000 + + blockReq.handle.complete(bd.blk) + + codex_block_exchange_retrieval_time_us.set(retrievalDurationUs) + + if retrievalDurationUs > 500000: + warn "High block retrieval time", retrievalDurationUs, address = bd.address + else: + trace "Block handle already finished", address = bd.address proc setInFlight*( p: PendingBlocksManager, - cid: Cid, + address: BlockAddress, inFlight = true) = - p.blocks.withValue(cid, pending): + ## Set inflight status for a block + ## + + p.blocks.withValue(address, pending): pending[].inFlight = inFlight - trace "Setting inflight", cid, inFlight = pending[].inFlight proc isInFlight*( p: PendingBlocksManager, - cid: Cid): bool = - p.blocks.withValue(cid, pending): + address: BlockAddress): bool = + ## Check if a block is in flight + ## + + p.blocks.withValue(address, pending): result = pending[].inFlight - trace "Getting inflight", cid, inFlight = result -proc pending*( - p: PendingBlocksManager, - cid: Cid): bool = cid in p.blocks +proc contains*(p: PendingBlocksManager, cid: Cid): bool = + BlockAddress.init(cid) in p.blocks -proc contains*( - p: PendingBlocksManager, - cid: Cid): bool = p.pending(cid) +proc contains*(p: PendingBlocksManager, address: BlockAddress): bool = + address in p.blocks -iterator wantList*(p: PendingBlocksManager): Cid = - for k in p.blocks.keys: - yield k +iterator wantList*(p: PendingBlocksManager): BlockAddress = + for a in p.blocks.keys: + yield a + +iterator wantListBlockCids*(p: PendingBlocksManager): Cid = + for a in p.blocks.keys: + if not a.leaf: + yield a.cid + +iterator wantListCids*(p: PendingBlocksManager): Cid = + var yieldedCids = initHashSet[Cid]() + for a in p.blocks.keys: + let cid = a.cidOrTreeCid + if cid notin yieldedCids: + yieldedCids.incl(cid) + yield cid iterator wantHandles*(p: PendingBlocksManager): Future[Block] = for v in p.blocks.values: yield v.handle +proc wantListLen*(p: PendingBlocksManager): int = + p.blocks.len + func len*(p: PendingBlocksManager): int = p.blocks.len -func new*(T: type PendingBlocksManager): T = - T() +func new*(T: type PendingBlocksManager): PendingBlocksManager = + PendingBlocksManager() diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index ee835fd5..448b8c4f 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -10,7 +10,6 @@ import std/tables import std/sequtils -import pkg/chronicles import pkg/chronos import pkg/libp2p @@ -19,6 +18,7 @@ import pkg/questionable import pkg/questionable/results import ../../blocktype as bt +import ../../logutils import ../protobuf/blockexc as pb import ../protobuf/payments @@ -34,47 +34,61 @@ const MaxInflight* = 100 type - WantListHandler* = proc(peer: PeerID, wantList: WantList): Future[void] {.gcsafe.} - BlocksHandler* = proc(peer: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} - BlockPresenceHandler* = proc(peer: PeerID, precense: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountHandler* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.} - PaymentHandler* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.} + WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} + BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} + BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} + AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} + PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + + BlockExcHandlers* = object + onWantList*: WantListHandler + onBlocksDelivery*: BlocksDeliveryHandler + onPresence*: BlockPresenceHandler + onAccount*: AccountHandler + onPayment*: PaymentHandler + WantListSender* = proc( - id: PeerID, - cids: seq[Cid], + id: PeerId, + addresses: seq[BlockAddress], priority: int32 = 0, cancel: bool = false, wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false): Future[void] {.gcsafe.} - - BlockExcHandlers* = object - onWantList*: WantListHandler - onBlocks*: BlocksHandler - onPresence*: BlockPresenceHandler - onAccount*: AccountHandler - onPayment*: PaymentHandler - - BlocksSender* = proc(peer: PeerID, presence: seq[bt.Block]): Future[void] {.gcsafe.} - PresenceSender* = proc(peer: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountSender* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.} - PaymentSender* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.} + WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} + BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} + PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} + AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} + PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} BlockExcRequest* = object sendWantList*: WantListSender - sendBlocks*: BlocksSender + sendWantCancellations*: WantCancellationSender + sendBlocksDelivery*: BlocksDeliverySender sendPresence*: PresenceSender sendAccount*: AccountSender sendPayment*: PaymentSender BlockExcNetwork* = ref object of LPProtocol - peers*: Table[PeerID, NetworkPeer] + peers*: Table[PeerId, NetworkPeer] switch*: Switch handlers*: BlockExcHandlers request*: BlockExcRequest getConn: ConnProvider inflightSema: AsyncSemaphore +proc peerId*(b: BlockExcNetwork): PeerId = + ## Return peer id + ## + + return b.switch.peerInfo.peerId + +proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool = + ## Check if peer is self + ## + + return b.peerId == peer + proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = ## Send message to peer ## @@ -82,8 +96,11 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = b.peers.withValue(id, peer): try: await b.inflightSema.acquire() - trace "Sending message to peer", peer = id await peer[].send(msg) + except CancelledError as error: + raise error + except CatchableError as err: + error "Error sending message", peer = id, msg = err.msg finally: b.inflightSema.release() do: @@ -97,31 +114,12 @@ proc handleWantList( ## if not b.handlers.onWantList.isNil: - trace "Handling want list for peer", peer = peer.id, items = list.entries.len await b.handlers.onWantList(peer.id, list) -# TODO: make into a template -proc makeWantList*( - cids: seq[Cid], - priority: int = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false): WantList = - WantList( - entries: cids.mapIt( - Entry( - `block`: it.data.buffer, - priority: priority.int32, - cancel: cancel, - wantType: wantType, - sendDontHave: sendDontHave) ), - full: full) - proc sendWantList*( b: BlockExcNetwork, - id: PeerID, - cids: seq[Cid], + id: PeerId, + addresses: seq[BlockAddress], priority: int32 = 0, cancel: bool = false, wantType: WantType = WantType.WantHave, @@ -130,57 +128,45 @@ proc sendWantList*( ## Send a want message to peer ## - trace "Sending want list to peer", peer = id, `type` = $wantType, items = cids.len - let msg = makeWantList( - cids, - priority, - cancel, - wantType, - full, - sendDontHave) + let msg = WantList( + entries: addresses.mapIt( + WantListEntry( + address: it, + priority: priority, + cancel: cancel, + wantType: wantType, + sendDontHave: sendDontHave) ), + full: full) b.send(id, Message(wantlist: msg)) -proc handleBlocks( +proc sendWantCancellations*( + b: BlockExcNetwork, + id: PeerId, + addresses: seq[BlockAddress]): Future[void] {.async.} = + ## Informs a remote peer that we're no longer interested in a set of blocks + ## + await b.sendWantList(id = id, addresses = addresses, cancel = true) + +proc handleBlocksDelivery( b: BlockExcNetwork, peer: NetworkPeer, - blocks: seq[pb.Block]) {.async.} = + blocksDelivery: seq[BlockDelivery]) {.async.} = ## Handle incoming blocks ## - if not b.handlers.onBlocks.isNil: - trace "Handling blocks for peer", peer = peer.id, items = blocks.len + if not b.handlers.onBlocksDelivery.isNil: + await b.handlers.onBlocksDelivery(peer.id, blocksDelivery) - var blks: seq[bt.Block] - for blob in blocks: - without cid =? Cid.init(blob.prefix): - trace "Unable to initialize Cid from protobuf message" - without blk =? bt.Block.new(cid, blob.data, verify = true): - trace "Unable to initialize Block from data" - - blks.add(blk) - - await b.handlers.onBlocks(peer.id, blks) - -template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] = - var blks: seq[pb.Block] - for blk in blocks: - blks.add(pb.Block( - prefix: blk.cid.data.buffer, - data: blk.data - )) - - blks - -proc sendBlocks*( +proc sendBlocksDelivery*( b: BlockExcNetwork, - id: PeerID, - blocks: seq[bt.Block]): Future[void] = + id: PeerId, + blocksDelivery: seq[BlockDelivery]): Future[void] = ## Send blocks to remote ## - b.send(id, pb.Message(payload: makeBlocks(blocks))) + b.send(id, pb.Message(payload: blocksDelivery)) proc handleBlockPresence( b: BlockExcNetwork, @@ -190,12 +176,11 @@ proc handleBlockPresence( ## if not b.handlers.onPresence.isNil: - trace "Handling block presence for peer", peer = peer.id, items = presence.len await b.handlers.onPresence(peer.id, presence) proc sendBlockPresence*( b: BlockExcNetwork, - id: PeerID, + id: PeerId, presence: seq[BlockPresence]): Future[void] = ## Send presence to remote ## @@ -240,43 +225,46 @@ proc handlePayment( if not network.handlers.onPayment.isNil: await network.handlers.onPayment(peer.id, payment) -proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.async.} = - try: - if msg.wantlist.entries.len > 0: - asyncSpawn b.handleWantList(peer, msg.wantlist) +proc rpcHandler( + b: BlockExcNetwork, + peer: NetworkPeer, + msg: Message) {.raises: [].} = + ## handle rpc messages + ## + if msg.wantList.entries.len > 0: + asyncSpawn b.handleWantList(peer, msg.wantList) - if msg.payload.len > 0: - asyncSpawn b.handleBlocks(peer, msg.payload) + if msg.payload.len > 0: + asyncSpawn b.handleBlocksDelivery(peer, msg.payload) - if msg.blockPresences.len > 0: - asyncSpawn b.handleBlockPresence(peer, msg.blockPresences) + if msg.blockPresences.len > 0: + asyncSpawn b.handleBlockPresence(peer, msg.blockPresences) - if account =? Account.init(msg.account): - asyncSpawn b.handleAccount(peer, account) + if account =? Account.init(msg.account): + asyncSpawn b.handleAccount(peer, account) - if payment =? SignedState.init(msg.payment): - asyncSpawn b.handlePayment(peer, payment) + if payment =? SignedState.init(msg.payment): + asyncSpawn b.handlePayment(peer, payment) - except CatchableError as exc: - trace "Exception in blockexc rpc handler", exc = exc.msg - -proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer = +proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = ## Creates or retrieves a BlockExcNetwork Peer ## if peer in b.peers: return b.peers.getOrDefault(peer, nil) - var getConn = proc(): Future[Connection] {.async.} = + var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} = try: return await b.switch.dial(peer, Codec) + except CancelledError as error: + raise error except CatchableError as exc: trace "Unable to connect to blockexc peer", exc = exc.msg if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc (p: NetworkPeer, msg: Message): Future[void] = + let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} = b.rpcHandler(p, msg) # create new pubsub peer @@ -287,7 +275,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer = return blockExcPeer -proc setupPeer*(b: BlockExcNetwork, peer: PeerID) = +proc setupPeer*(b: BlockExcNetwork, peer: PeerId) = ## Perform initial setup, such as want ## list exchange ## @@ -295,9 +283,16 @@ proc setupPeer*(b: BlockExcNetwork, peer: PeerID) = discard b.getOrCreatePeer(peer) proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} = + ## Dial a peer + ## + + if b.isSelf(peer.peerId): + trace "Skipping dialing self", peer = peer.peerId + return + await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) -proc dropPeer*(b: BlockExcNetwork, peer: PeerID) = +proc dropPeer*(b: BlockExcNetwork, peer: PeerId) = ## Cleanup disconnected peer ## @@ -307,7 +302,7 @@ method init*(b: BlockExcNetwork) = ## Perform protocol initialization ## - proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} = + proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = if event.kind == PeerEventKind.Joined: b.setupPeer(peerId) else: @@ -328,7 +323,7 @@ proc new*( T: type BlockExcNetwork, switch: Switch, connProvider: ConnProvider = nil, - maxInflight = MaxInflight): T = + maxInflight = MaxInflight): BlockExcNetwork = ## Create a new BlockExcNetwork instance ## @@ -339,8 +334,8 @@ proc new*( inflightSema: newAsyncSemaphore(maxInflight)) proc sendWantList( - id: PeerID, - cids: seq[Cid], + id: PeerId, + cids: seq[BlockAddress], priority: int32 = 0, cancel: bool = false, wantType: WantType = WantType.WantHave, @@ -350,21 +345,25 @@ proc new*( id, cids, priority, cancel, wantType, full, sendDontHave) - proc sendBlocks(id: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} = - self.sendBlocks(id, blocks) + proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} = + self.sendWantCancellations(id, addresses) - proc sendPresence(id: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = + proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} = + self.sendBlocksDelivery(id, blocksDelivery) + + proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = self.sendBlockPresence(id, presence) - proc sendAccount(id: PeerID, account: Account): Future[void] {.gcsafe.} = + proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} = self.sendAccount(id, account) - proc sendPayment(id: PeerID, payment: SignedState): Future[void] {.gcsafe.} = + proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} = self.sendPayment(id, payment) self.request = BlockExcRequest( sendWantList: sendWantList, - sendBlocks: sendBlocks, + sendWantCancellations: sendWantCancellations, + sendBlocksDelivery: sendBlocksDelivery, sendPresence: sendPresence, sendAccount: sendAccount, sendPayment: sendPayment) diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index cef58bc5..133d8c7c 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -11,18 +11,16 @@ import pkg/upraises push: {.upraises: [].} import pkg/chronos -import pkg/chronicles import pkg/libp2p import ../protobuf/blockexc +import ../protobuf/message import ../../errors +import ../../logutils logScope: topics = "codex blockexcnetworkpeer" -const - MaxMessageSize = 100 * 1 shl 20 # manifest files can be big - type ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.} @@ -45,12 +43,13 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = try: while not conn.atEof or not conn.closed: let - data = await conn.readLp(MaxMessageSize) - msg = Message.ProtobufDecode(data).mapFailure().tryGet() - trace "Got message for peer", peer = b.id + data = await conn.readLp(MaxMessageSize.int) + msg = Message.protobufDecode(data).mapFailure().tryGet() await b.handler(b, msg) - except CatchableError as exc: - trace "Exception in blockexc read loop", exc = exc.msg + except CancelledError: + trace "Read loop cancelled" + except CatchableError as err: + warn "Exception in blockexc read loop", msg = err.msg finally: await conn.close() @@ -66,18 +65,17 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} = let conn = await b.connect() if isNil(conn): - trace "Unable to get send connection for peer message not sent", peer = b.id + warn "Unable to get send connection for peer message not sent", peer = b.id return - trace "Sending message to remote", peer = b.id - await conn.writeLp(ProtobufEncode(msg)) + await conn.writeLp(protobufEncode(msg)) proc broadcast*(b: NetworkPeer, msg: Message) = proc sendAwaiter() {.async.} = try: await b.send(msg) except CatchableError as exc: - trace "Exception broadcasting message to peer", peer = b.id, exc = exc.msg + warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg asyncSpawn sendAwaiter() @@ -85,7 +83,7 @@ func new*( T: type NetworkPeer, peer: PeerId, connProvider: ConnProvider, - rpcHandler: RPCHandler): T = + rpcHandler: RPCHandler): NetworkPeer = doAssert(not isNil(connProvider), "should supply connection provider") diff --git a/codex/blockexchange/peers/peercontext.nim b/codex/blockexchange/peers/peercontext.nim index b9504c90..727676de 100644 --- a/codex/blockexchange/peers/peercontext.nim +++ b/codex/blockexchange/peers/peercontext.nim @@ -9,8 +9,8 @@ import std/sequtils import std/tables +import std/sets -import pkg/chronicles import pkg/libp2p import pkg/chronos import pkg/nitro @@ -20,42 +20,47 @@ import ../protobuf/blockexc import ../protobuf/payments import ../protobuf/presence -export payments, nitro +import ../../blocktype +import ../../logutils -logScope: - topics = "codex peercontext" +export payments, nitro type BlockExcPeerCtx* = ref object of RootObj - id*: PeerID - blocks*: Table[Cid, Presence] # remote peer have list including price - peerWants*: seq[Entry] # remote peers want lists - exchanged*: int # times peer has exchanged with us - lastExchange*: Moment # last time peer has exchanged with us - account*: ?Account # ethereum account of this peer - paymentChannel*: ?ChannelId # payment channel id + id*: PeerId + blocks*: Table[BlockAddress, Presence] # remote peer have list including price + peerWants*: seq[WantListEntry] # remote peers want lists + exchanged*: int # times peer has exchanged with us + lastExchange*: Moment # last time peer has exchanged with us + account*: ?Account # ethereum account of this peer + paymentChannel*: ?ChannelId # payment channel id -proc peerHave*(self: BlockExcPeerCtx): seq[Cid] = +proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] = toSeq(self.blocks.keys) -proc contains*(self: BlockExcPeerCtx, cid: Cid): bool = - cid in self.blocks +proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] = + self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet + +proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] = + self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet + +proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool = + address in self.blocks func setPresence*(self: BlockExcPeerCtx, presence: Presence) = - self.blocks[presence.cid] = presence + self.blocks[presence.address] = presence -func cleanPresence*(self: BlockExcPeerCtx, cids: seq[Cid]) = - for cid in cids: - self.blocks.del(cid) +func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) = + for a in addresses: + self.blocks.del(a) -func cleanPresence*(self: BlockExcPeerCtx, cid: Cid) = - self.cleanPresence(@[cid]) +func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) = + self.cleanPresence(@[address]) -func price*(self: BlockExcPeerCtx, cids: seq[Cid]): UInt256 = +func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 = var price = 0.u256 - for cid in cids: - self.blocks.withValue(cid, precense): + for a in addresses: + self.blocks.withValue(a, precense): price += precense[].price - trace "Blocks price", price price diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 3d17fdbe..a64ecd22 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -16,10 +16,12 @@ import pkg/upraises push: {.upraises: [].} import pkg/chronos -import pkg/chronicles import pkg/libp2p import ../protobuf/blockexc +import ../../blocktype +import ../../logutils + import ./peercontext export peercontext @@ -29,56 +31,59 @@ logScope: type PeerCtxStore* = ref object of RootObj - peers*: OrderedTable[PeerID, BlockExcPeerCtx] + peers*: OrderedTable[PeerId, BlockExcPeerCtx] iterator items*(self: PeerCtxStore): BlockExcPeerCtx = for p in self.peers.values: yield p -proc contains*(a: openArray[BlockExcPeerCtx], b: PeerID): bool = +proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool = ## Convenience method to check for peer precense ## a.anyIt( it.id == b ) -func contains*(self: PeerCtxStore, peerId: PeerID): bool = +func contains*(self: PeerCtxStore, peerId: PeerId): bool = peerId in self.peers func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) = - trace "Adding peer to peer context store", peer = peer.id self.peers[peer.id] = peer -func remove*(self: PeerCtxStore, peerId: PeerID) = - trace "Removing peer from peer context store", peer = peerId +func remove*(self: PeerCtxStore, peerId: PeerId) = self.peers.del(peerId) -func get*(self: PeerCtxStore, peerId: PeerID): BlockExcPeerCtx = - trace "Retrieving peer from peer context store", peer = peerId +func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx = self.peers.getOrDefault(peerId, nil) func len*(self: PeerCtxStore): int = self.peers.len +func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = + toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) ) + func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == cid ) ) + toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) ) + +func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = + toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) ) func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.cid == cid ) ) + toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) ) -func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - var - peers = self.peersHave(cid) +func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = + # assume that the price for all leaves in a tree is the same + let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid) + var peers = self.peersHave(rootAddress) - trace "Selecting cheapest peers", peers = peers.len func cmp(a, b: BlockExcPeerCtx): int = var priceA = 0.u256 priceB = 0.u256 - a.blocks.withValue(cid, precense): + a.blocks.withValue(rootAddress, precense): priceA = precense[].price - b.blocks.withValue(cid, precense): + b.blocks.withValue(rootAddress, precense): priceB = precense[].price if priceA == priceB: @@ -93,5 +98,5 @@ func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = return peers proc new*(T: type PeerCtxStore): PeerCtxStore = - T( - peers: initOrderedTable[PeerID, BlockExcPeerCtx]()) + ## create new instance of a peer context store + PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]()) diff --git a/codex/blockexchange/protobuf/blockexc.nim b/codex/blockexchange/protobuf/blockexc.nim index f52f8042..12049853 100644 --- a/codex/blockexchange/protobuf/blockexc.nim +++ b/codex/blockexchange/protobuf/blockexc.nim @@ -9,47 +9,45 @@ import std/hashes import std/sequtils -import pkg/libp2p +import pkg/stew/endians2 import message -export Message, ProtobufEncode, ProtobufDecode -export Wantlist, WantType, Entry -export Block, BlockPresenceType, BlockPresence +import ../../blocktype + +export Message, protobufEncode, protobufDecode +export Wantlist, WantType, WantListEntry +export BlockDelivery, BlockPresenceType, BlockPresence export AccountMessage, StateChannelUpdate -proc hash*(e: Entry): Hash = - hash(e.`block`) +proc hash*(a: BlockAddress): Hash = + if a.leaf: + let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE) + hash(data) + else: + hash(a.cid.data.buffer) -proc cid*(e: Entry): Cid = - ## Helper to convert raw bytes to Cid - ## +proc hash*(e: WantListEntry): Hash = + hash(e.address) - Cid.init(e.`block`).get() - -proc contains*(a: openArray[Entry], b: Cid): bool = +proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool = ## Convenience method to check for peer precense ## - a.filterIt( it.cid == b ).len > 0 + a.anyIt(it.address == b) -proc `==`*(a: Entry, cid: Cid): bool = - return a.cid == cid +proc `==`*(a: WantListEntry, b: BlockAddress): bool = + return a.address == b -proc `<`*(a, b: Entry): bool = +proc `<`*(a, b: WantListEntry): bool = a.priority < b.priority -proc cid*(e: BlockPresence): Cid = - ## Helper to convert raw bytes to Cid - ## - Cid.init(e.cid).get() +proc `==`*(a: BlockPresence, b: BlockAddress): bool = + return a.address == b -proc `==`*(a: BlockPresence, cid: Cid): bool = - return cid(a) == cid - -proc contains*(a: openArray[BlockPresence], b: Cid): bool = +proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool = ## Convenience method to check for peer precense ## - a.filterIt( cid(it) == b ).len > 0 + a.anyIt(it.address == b) diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index e3d44155..61488b40 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -2,36 +2,50 @@ # and Protobuf encoder/decoder for these messages. # # Eventually all this code should be auto-generated from message.proto. +import std/sugar import pkg/libp2p/protobuf/minprotobuf +import pkg/libp2p/cid +import pkg/questionable + +import ../../units + +import ../../merkletree +import ../../blocktype + +const + MaxBlockSize* = 100.MiBs.uint + MaxMessageSize* = 100.MiBs.uint type WantType* = enum WantBlock = 0, WantHave = 1 - Entry* = object - `block`*: seq[byte] # The block cid + WantListEntry* = object + address*: BlockAddress priority*: int32 # The priority (normalized). default to 1 cancel*: bool # Whether this revokes an entry wantType*: WantType # Note: defaults to enum 0, ie Block sendDontHave*: bool # Note: defaults to false + inFlight*: bool # Whether block sending is in progress. Not serialized. - Wantlist* = object - entries*: seq[Entry] # A list of wantlist entries - full*: bool # Whether this is the full wantlist. default to false + WantList* = object + entries*: seq[WantListEntry] # A list of wantList entries + full*: bool # Whether this is the full wantList. default to false - Block* = object - prefix*: seq[byte] # CID prefix (cid version, multicodec and multihash prefix (type + length) - data*: seq[byte] + BlockDelivery* = object + blk*: Block + address*: BlockAddress + proof*: ?CodexProof # Present only if `address.leaf` is true BlockPresenceType* = enum Have = 0, DontHave = 1 BlockPresence* = object - cid*: seq[byte] # The block cid + address*: BlockAddress `type`*: BlockPresenceType price*: seq[byte] # Amount of assets to pay for the block (UInt256) @@ -42,8 +56,8 @@ type update*: seq[byte] # Signed Nitro state, serialized as JSON Message* = object - wantlist*: Wantlist - payload*: seq[Block] + wantList*: WantList + payload*: seq[BlockDelivery] blockPresences*: seq[BlockPresence] pendingBytes*: uint account*: AccountMessage @@ -53,9 +67,20 @@ type # Encoding Message into seq[byte] in Protobuf format # -proc write*(pb: var ProtoBuffer, field: int, value: Entry) = +proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) = var ipb = initProtoBuffer() - ipb.write(1, value.`block`) + ipb.write(1, value.leaf.uint) + if value.leaf: + ipb.write(2, value.treeCid.data.buffer) + ipb.write(3, value.index.uint64) + else: + ipb.write(4, value.cid.data.buffer) + ipb.finish() + pb.write(field, ipb) + +proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) = + var ipb = initProtoBuffer() + ipb.write(1, value.address) ipb.write(2, value.priority.uint64) ipb.write(3, value.cancel.uint) ipb.write(4, value.wantType.uint) @@ -63,7 +88,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: Entry) = ipb.finish() pb.write(field, ipb) -proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) = +proc write*(pb: var ProtoBuffer, field: int, value: WantList) = var ipb = initProtoBuffer() for v in value.entries: ipb.write(1, v) @@ -71,16 +96,20 @@ proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) = ipb.finish() pb.write(field, ipb) -proc write*(pb: var ProtoBuffer, field: int, value: Block) = - var ipb = initProtoBuffer() - ipb.write(1, value.prefix) - ipb.write(2, value.data) +proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) = + var ipb = initProtoBuffer(maxSize = MaxBlockSize) + ipb.write(1, value.blk.cid.data.buffer) + ipb.write(2, value.blk.data) + ipb.write(3, value.address) + if value.address.leaf: + if proof =? value.proof: + ipb.write(4, proof.encode()) ipb.finish() pb.write(field, ipb) proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) = var ipb = initProtoBuffer() - ipb.write(1, value.cid) + ipb.write(1, value.address) ipb.write(2, value.`type`.uint) ipb.write(3, value.price) ipb.finish() @@ -98,9 +127,9 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) = ipb.finish() pb.write(field, ipb) -proc ProtobufEncode*(value: Message): seq[byte] = - var ipb = initProtoBuffer() - ipb.write(1, value.wantlist) +proc protobufEncode*(value: Message): seq[byte] = + var ipb = initProtoBuffer(maxSize = MaxMessageSize) + ipb.write(1, value.wantList) for v in value.payload: ipb.write(3, v) for v in value.blockPresences: @@ -115,12 +144,40 @@ proc ProtobufEncode*(value: Message): seq[byte] = # # Decoding Message from seq[byte] in Protobuf format # - -proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] = +proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] = var - value = Entry() + value: BlockAddress + leaf: bool field: uint64 - discard ? pb.getField(1, value.`block`) + cidBuf = newSeq[byte]() + + if ? pb.getField(1, field): + leaf = bool(field) + + if leaf: + var + treeCid: Cid + index: Natural + if ? pb.getField(2, cidBuf): + treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ? pb.getField(3, field): + index = field + value = BlockAddress(leaf: true, treeCid: treeCid, index: index) + else: + var cid: Cid + if ? pb.getField(4, cidBuf): + cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + value = BlockAddress(leaf: false, cid: cid) + + ok(value) + +proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] = + var + value = WantListEntry() + field: uint64 + ipb: ProtoBuffer + if ? pb.getField(1, ipb): + value.address = ? BlockAddress.decode(ipb) if ? pb.getField(2, field): value.priority = int32(field) if ? pb.getField(3, field): @@ -131,30 +188,52 @@ proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] = value.sendDontHave = bool(field) ok(value) -proc decode*(_: type Wantlist, pb: ProtoBuffer): ProtoResult[Wantlist] = +proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] = var - value = Wantlist() + value = WantList() field: uint64 sublist: seq[seq[byte]] if ? pb.getRepeatedField(1, sublist): for item in sublist: - value.entries.add(? Entry.decode(initProtoBuffer(item))) + value.entries.add(? WantListEntry.decode(initProtoBuffer(item))) if ? pb.getField(2, field): value.full = bool(field) ok(value) -proc decode*(_: type Block, pb: ProtoBuffer): ProtoResult[Block] = +proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] = var - value = Block() - discard ? pb.getField(1, value.prefix) - discard ? pb.getField(2, value.data) + value = BlockDelivery() + dataBuf = newSeq[byte]() + cidBuf = newSeq[byte]() + cid: Cid + ipb: ProtoBuffer + + if ? pb.getField(1, cidBuf): + cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ? pb.getField(2, dataBuf): + value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob) + if ? pb.getField(3, ipb): + value.address = ? BlockAddress.decode(ipb) + + if value.address.leaf: + var proofBuf = newSeq[byte]() + if ? pb.getField(4, proofBuf): + let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob) + value.proof = proof.some + else: + value.proof = CodexProof.none + else: + value.proof = CodexProof.none + ok(value) proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] = var value = BlockPresence() field: uint64 - discard ? pb.getField(1, value.cid) + ipb: ProtoBuffer + if ? pb.getField(1, ipb): + value.address = ? BlockAddress.decode(ipb) if ? pb.getField(2, field): value.`type` = BlockPresenceType(field) discard ? pb.getField(3, value.price) @@ -172,17 +251,17 @@ proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChan discard ? pb.getField(1, value.update) ok(value) -proc ProtobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = +proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = var value = Message() - pb = initProtoBuffer(msg) + pb = initProtoBuffer(msg, maxSize = MaxMessageSize) ipb: ProtoBuffer sublist: seq[seq[byte]] if ? pb.getField(1, ipb): - value.wantlist = ? Wantlist.decode(ipb) + value.wantList = ? WantList.decode(ipb) if ? pb.getRepeatedField(3, sublist): for item in sublist: - value.payload.add(? Block.decode(initProtoBuffer(item))) + value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))) if ? pb.getRepeatedField(4, sublist): for item in sublist: value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item))) diff --git a/codex/blockexchange/protobuf/presence.nim b/codex/blockexchange/protobuf/presence.nim index 1a1c6c5c..2b07191d 100644 --- a/codex/blockexchange/protobuf/presence.nim +++ b/codex/blockexchange/protobuf/presence.nim @@ -5,6 +5,8 @@ import pkg/questionable/results import pkg/upraises import ./blockexc +import ../../blocktype + export questionable export stint export BlockPresenceType @@ -14,7 +16,7 @@ upraises.push: {.upraises: [].} type PresenceMessage* = blockexc.BlockPresence Presence* = object - cid*: Cid + address*: BlockAddress have*: bool price*: UInt256 @@ -24,19 +26,18 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 = UInt256.fromBytesBE(bytes).some func init*(_: type Presence, message: PresenceMessage): ?Presence = - without cid =? Cid.init(message.cid) and - price =? UInt256.parse(message.price): + without price =? UInt256.parse(message.price): return none Presence some Presence( - cid: cid, + address: message.address, have: message.`type` == BlockPresenceType.Have, price: price ) func init*(_: type PresenceMessage, presence: Presence): PresenceMessage = PresenceMessage( - cid: presence.cid.data.buffer, + address: presence.address, `type`: if presence.have: BlockPresenceType.Have else: diff --git a/codex/blocktype.nim b/codex/blocktype.nim index 0911c22a..c44e4fd8 100644 --- a/codex/blocktype.nim +++ b/codex/blocktype.nim @@ -8,120 +8,78 @@ ## those terms. import std/tables +import std/sugar + export tables import pkg/upraises push: {.upraises: [].} -import pkg/libp2p +import pkg/libp2p/[cid, multicodec, multihash] import pkg/stew/byteutils import pkg/questionable import pkg/questionable/results -import pkg/chronicles -import ./formats +import ./units +import ./utils import ./errors +import ./logutils +import ./utils/json +import ./codextypes -export errors, formats - -const - # Size of blocks for storage / network exchange, - # should be divisible by 31 for PoR and by 64 for Leopard ECC - BlockSize* = 31 * 64 * 33 +export errors, logutils, units, codextypes type Block* = ref object of RootObj cid*: Cid data*: seq[byte] - BlockNotFoundError* = object of CodexError + BlockAddress* = object + case leaf*: bool + of true: + treeCid* {.serialize.}: Cid + index* {.serialize.}: Natural + else: + cid* {.serialize.}: Cid -template EmptyCid*: untyped = - var - emptyCid {.global, threadvar.}: - array[CIDv0..CIDv1, Table[MultiCodec, Cid]] +logutils.formatIt(LogFormat.textLines, BlockAddress): + if it.leaf: + "treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index + else: + "cid: " & shortLog($it.cid) - once: - emptyCid = [ - CIDv0: { - multiCodec("sha2-256"): Cid - .init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") - .get() - }.toTable, - CIDv1: { - multiCodec("sha2-256"): Cid - .init("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku") - .get() - }.toTable, - ] +logutils.formatIt(LogFormat.json, BlockAddress): %it - emptyCid +proc `==`*(a, b: BlockAddress): bool = + a.leaf == b.leaf and + ( + if a.leaf: + a.treeCid == b.treeCid and a.index == b.index + else: + a.cid == b.cid + ) -template EmptyDigests*: untyped = - var - emptyDigests {.global, threadvar.}: - array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]] +proc `$`*(a: BlockAddress): string = + if a.leaf: + "treeCid: " & $a.treeCid & ", index: " & $a.index + else: + "cid: " & $a.cid - once: - emptyDigests = [ - CIDv0: { - multiCodec("sha2-256"): EmptyCid[CIDv0] - .catch - .get()[multiCodec("sha2-256")] - .catch - .get() - .mhash - .get() - }.toTable, - CIDv1: { - multiCodec("sha2-256"): EmptyCid[CIDv1] - .catch - .get()[multiCodec("sha2-256")] - .catch - .get() - .mhash - .get() - }.toTable, - ] +proc cidOrTreeCid*(a: BlockAddress): Cid = + if a.leaf: + a.treeCid + else: + a.cid - emptyDigests +proc address*(b: Block): BlockAddress = + BlockAddress(leaf: false, cid: b.cid) -template EmptyBlock*: untyped = - var - emptyBlock {.global, threadvar.}: - array[CIDv0..CIDv1, Table[MultiCodec, Block]] +proc init*(_: type BlockAddress, cid: Cid): BlockAddress = + BlockAddress(leaf: false, cid: cid) - once: - emptyBlock = [ - CIDv0: { - multiCodec("sha2-256"): Block( - cid: EmptyCid[CIDv0][multiCodec("sha2-256")]) - }.toTable, - CIDv1: { - multiCodec("sha2-256"): Block( - cid: EmptyCid[CIDv1][multiCodec("sha2-256")]) - }.toTable, - ] - - emptyBlock - -proc isEmpty*(cid: Cid): bool = - cid == EmptyCid[cid.cidver] - .catch - .get()[cid.mhash.get().mcodec] - .catch - .get() - -proc isEmpty*(blk: Block): bool = - blk.cid.isEmpty - -proc emptyBlock*(cid: Cid): Block = - EmptyBlock[cid.cidver] - .catch - .get()[cid.mhash.get().mcodec] - .catch - .get() +proc init*(_: type BlockAddress, treeCid: Cid, index: Natural): BlockAddress = + BlockAddress(leaf: true, treeCid: treeCid, index: index) proc `$`*(b: Block): string = result &= "cid: " & $b.cid @@ -131,8 +89,10 @@ func new*( T: type Block, data: openArray[byte] = [], version = CIDv1, - mcodec = multiCodec("sha2-256"), - codec = multiCodec("raw")): ?!T = + mcodec = Sha256HashCodec, + codec = BlockCodec): ?!Block = + ## creates a new block for both storage and network IO + ## let hash = ? MultiHash.digest($mcodec, data).mapFailure @@ -144,21 +104,39 @@ func new*( cid: cid, data: @data).success -func new*( - T: type Block, - cid: Cid, - data: openArray[byte], - verify: bool = true): ?!T = +proc new*( + T: type Block, + cid: Cid, + data: openArray[byte], + verify: bool = true +): ?!Block = + ## creates a new block for both storage and network IO + ## - let - mhash = ? cid.mhash.mapFailure - b = ? Block.new( - data = @data, - version = cid.cidver, - codec = cid.mcodec, - mcodec = mhash.mcodec) + if verify: + let + mhash = ? cid.mhash.mapFailure + computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure + computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure + if computedCid != cid: + return "Cid doesn't match the data".failure - if verify and cid != b.cid: - return "Cid and content don't match!".failure + return Block( + cid: cid, + data: @data + ).success - success b +proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block = + emptyCid(version, hcodec, BlockCodec) + .flatMap((cid: Cid) => Block.new(cid = cid, data = @[])) + +proc emptyBlock*(cid: Cid): ?!Block = + cid.mhash.mapFailure.flatMap((mhash: MultiHash) => + emptyBlock(cid.cidver, mhash.mcodec)) + +proc isEmpty*(cid: Cid): bool = + success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) => + emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)) + +proc isEmpty*(blk: Block): bool = + blk.cid.isEmpty diff --git a/codex/chunker.nim b/codex/chunker.nim index 9d4d0fd2..a3ecc7c8 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -13,18 +13,18 @@ import pkg/upraises push: {.upraises: [].} -import pkg/chronicles import pkg/questionable import pkg/questionable/results import pkg/chronos import pkg/libp2p except shuffle import ./blocktype +import ./logutils export blocktype const - DefaultChunkSize* = BlockSize + DefaultChunkSize* = DefaultBlockSize type # default reader type @@ -35,7 +35,7 @@ type Chunker* = ref object reader*: Reader # Procedure called to actually read the data offset*: int # Bytes read so far (position in the stream) - chunkSize*: Natural # Size of each chunk + chunkSize*: NBytes # Size of each chunk pad*: bool # Pad last chunk to chunkSize? FileChunker* = Chunker @@ -46,7 +46,7 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} = ## the instantiated chunker ## - var buff = newSeq[byte](c.chunkSize) + var buff = newSeq[byte](c.chunkSize.int) let read = await c.reader(cast[ChunkBuffer](addr buff[0]), buff.len) if read <= 0: @@ -59,22 +59,26 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} = return move buff -func new*( - T: type Chunker, - reader: Reader, - chunkSize = DefaultChunkSize, - pad = true): T = - - T(reader: reader, +proc new*( + T: type Chunker, + reader: Reader, + chunkSize = DefaultChunkSize, + pad = true +): Chunker = + ## create a new Chunker instance + ## + Chunker( + reader: reader, offset: 0, chunkSize: chunkSize, pad: pad) proc new*( - T: type LPStreamChunker, - stream: LPStream, - chunkSize = DefaultChunkSize, - pad = true): T = + T: type LPStreamChunker, + stream: LPStream, + chunkSize = DefaultChunkSize, + pad = true +): LPStreamChunker = ## create the default File chunker ## @@ -86,22 +90,25 @@ proc new*( res += await stream.readOnce(addr data[res], len - res) except LPStreamEOFError as exc: trace "LPStreamChunker stream Eof", exc = exc.msg + except CancelledError as error: + raise error except CatchableError as exc: trace "CatchableError exception", exc = exc.msg raise newException(Defect, exc.msg) return res - T.new( + LPStreamChunker.new( reader = reader, chunkSize = chunkSize, pad = pad) proc new*( - T: type FileChunker, - file: File, - chunkSize = DefaultChunkSize, - pad = true): T = + T: type FileChunker, + file: File, + chunkSize = DefaultChunkSize, + pad = true +): FileChunker = ## create the default File chunker ## @@ -117,13 +124,15 @@ proc new*( total += res except IOError as exc: trace "Exception reading file", exc = exc.msg + except CancelledError as error: + raise error except CatchableError as exc: trace "CatchableError exception", exc = exc.msg raise newException(Defect, exc.msg) return total - T.new( + FileChunker.new( reader = reader, chunkSize = chunkSize, pad = pad) diff --git a/codex/clock.nim b/codex/clock.nim index ab40eeb4..f680ddec 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -1,16 +1,24 @@ import pkg/chronos +import pkg/stew/endians2 +import pkg/upraises +import pkg/stint type Clock* = ref object of RootObj SecondsSince1970* = int64 Timeout* = object of CatchableError -method now*(clock: Clock): SecondsSince1970 {.base.} = +method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} = raiseAssert "not implemented" -proc waitUntil*(clock: Clock, time: SecondsSince1970) {.async.} = - while clock.now() < time: - await sleepAsync(1.seconds) +method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} = + raiseAssert "not implemented" + +method start*(clock: Clock) {.base, async.} = + discard + +method stop*(clock: Clock) {.base, async.} = + discard proc withTimeout*(future: Future[void], clock: Clock, @@ -23,3 +31,14 @@ proc withTimeout*(future: Future[void], if not future.completed: await future.cancelAndWait() raise newException(Timeout, "Timed out") + +proc toBytes*(i: SecondsSince1970): seq[byte] = + let asUint = cast[uint64](i) + @(asUint.toBytes) + +proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 = + let asUint = uint64.fromBytes(bytes) + cast[int64](asUint) + +proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 = + bigint.truncate(int64) diff --git a/codex/codex.nim b/codex/codex.nim index a6273785..0b9182fb 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -8,10 +8,11 @@ ## those terms. import std/sequtils +import std/strutils import std/os -import std/sugar +import std/tables +import std/cpuinfo -import pkg/chronicles import pkg/chronos import pkg/presto import pkg/libp2p @@ -20,38 +21,140 @@ import pkg/confutils/defs import pkg/nitro import pkg/stew/io2 import pkg/stew/shims/net as stewnet +import pkg/datastore +import pkg/ethers except Rng +import pkg/stew/io2 +import pkg/taskpools import ./node import ./conf import ./rng import ./rest/api import ./stores +import ./slots import ./blockexchange import ./utils/fileutils import ./erasure import ./discovery import ./contracts -import ./utils/keyutils +import ./systemclock +import ./contracts/clock +import ./contracts/deployment import ./utils/addrutils +import ./namespaces +import ./codextypes +import ./logutils logScope: topics = "codex node" type CodexServer* = ref object - runHandle: Future[void] config: CodexConf restServer: RestServerRef codexNode: CodexNodeRef + repoStore: RepoStore + maintenance: BlockMaintainer + taskpool: Taskpool CodexPrivateKey* = libp2p.PrivateKey # alias + EthWallet = ethers.Wallet + +proc waitForSync(provider: Provider): Future[void] {.async.} = + var sleepTime = 1 + trace "Checking sync state of Ethereum provider..." + while await provider.isSyncing: + notice "Waiting for Ethereum provider to sync..." + await sleepAsync(sleepTime.seconds) + if sleepTime < 10: + inc sleepTime + trace "Ethereum provider is synced." + +proc bootstrapInteractions( + s: CodexServer): Future[void] {.async.} = + ## bootstrap interactions and return contracts + ## using clients, hosts, validators pairings + ## + let + config = s.config + repo = s.repoStore + + if config.persistence: + if not config.ethAccount.isSome and not config.ethPrivateKey.isSome: + error "Persistence enabled, but no Ethereum account was set" + quit QuitFailure + + let provider = JsonRpcProvider.new(config.ethProvider) + await waitForSync(provider) + var signer: Signer + if account =? config.ethAccount: + signer = provider.getSigner(account) + elif keyFile =? config.ethPrivateKey: + without isSecure =? checkSecureFile(keyFile): + error "Could not check file permissions: does Ethereum private key file exist?" + quit QuitFailure + if not isSecure: + error "Ethereum private key file does not have safe file permissions" + quit QuitFailure + without key =? keyFile.readAllChars(): + error "Unable to read Ethereum private key file" + quit QuitFailure + without wallet =? EthWallet.new(key.strip(), provider): + error "Invalid Ethereum private key in file" + quit QuitFailure + signer = wallet + + let deploy = Deployment.new(provider, config) + without marketplaceAddress =? await deploy.address(Marketplace): + error "No Marketplace address was specified or there is no known address for the current network" + quit QuitFailure + + let marketplace = Marketplace.new(marketplaceAddress, signer) + let market = OnChainMarket.new(marketplace) + let clock = OnChainClock.new(provider) + + var client: ?ClientInteractions + var host: ?HostInteractions + var validator: ?ValidatorInteractions + + if config.validator or config.persistence: + s.codexNode.clock = clock + else: + s.codexNode.clock = SystemClock() + + if config.persistence: + # This is used for simulation purposes. Normal nodes won't be compiled with this flag + # and hence the proof failure will always be 0. + when codex_enable_proof_failures: + let proofFailures = config.simulateProofFailures + if proofFailures > 0: + warn "Enabling proof failure simulation!" + else: + let proofFailures = 0 + if config.simulateProofFailures > 0: + warn "Proof failure simulation is not enabled for this build! Configuration ignored" + + let purchasing = Purchasing.new(market, clock) + let sales = Sales.new(market, clock, repo, proofFailures) + client = some ClientInteractions.new(clock, purchasing) + host = some HostInteractions.new(clock, sales) + + if config.validator: + let validation = Validation.new(clock, market, config.validatorMaxSlots) + validator = some ValidatorInteractions.new(clock, validation) + + s.codexNode.contracts = (client, host, validator) proc start*(s: CodexServer) {.async.} = - s.restServer.start() - await s.codexNode.start() + trace "Starting codex node", config = $s.config + + await s.repoStore.start() + s.maintenance.start() + + await s.codexNode.switch.start() let - # TODO: Can't define this as constants, pity + # TODO: Can't define these as constants, pity natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/") .expect("Should create multiaddress") anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/") @@ -75,32 +178,29 @@ proc start*(s: CodexServer) {.async.} = s.codexNode.discovery.updateAnnounceRecord(announceAddrs) s.codexNode.discovery.updateDhtRecord(s.config.nat, s.config.discoveryPort) - s.runHandle = newFuture[void]("codex.runHandle") - await s.runHandle + await s.bootstrapInteractions() + await s.codexNode.start() + s.restServer.start() proc stop*(s: CodexServer) {.async.} = + notice "Stopping codex node" + + + s.taskpool.syncAll() + s.taskpool.shutdown() + await allFuturesThrowing( - s.restServer.stop(), s.codexNode.stop()) - - s.runHandle.complete() - -proc new(_: type ContractInteractions, config: CodexConf): ?ContractInteractions = - if not config.persistence: - if config.ethAccount.isSome: - warn "Ethereum account was set, but persistence is not enabled" - return - - without account =? config.ethAccount: - error "Persistence enabled, but no Ethereum account was set" - quit QuitFailure - - if deployment =? config.ethDeployment: - ContractInteractions.new(config.ethProvider, account, deployment) - else: - ContractInteractions.new(config.ethProvider, account) - -proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey): T = + s.restServer.stop(), + s.codexNode.switch.stop(), + s.codexNode.stop(), + s.repoStore.stop(), + s.maintenance.stop()) +proc new*( + T: type CodexServer, + config: CodexConf, + privateKey: CodexPrivateKey): CodexServer = + ## create CodexServer including setting up datastore, repostore, etc let switch = SwitchBuilder .new() @@ -118,13 +218,22 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey): var cache: CacheStore = nil - if config.cacheSize > 0: - cache = CacheStore.new(cacheSize = config.cacheSize * MiB) + if config.cacheSize > 0'nb: + cache = CacheStore.new(cacheSize = config.cacheSize) + ## Is unused? let - discoveryStore = Datastore(SQLiteDatastore.new( - config.dataDir / "dht") - .expect("Should not fail!")) + discoveryDir = config.dataDir / CodexDhtNamespace + + if io2.createPath(discoveryDir).isErr: + trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir + raise (ref Defect)( + msg: "Unable to create discovery directory for block store: " & discoveryDir) + + let + discoveryStore = Datastore( + LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace) + .expect("Should create discovery datastore!")) discovery = Discovery.new( switch.peerInfo.privateKey, @@ -136,32 +245,85 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey): wallet = WalletRef.new(EthPrivateKey.random()) network = BlockExcNetwork.new(switch) - repoDir = config.dataDir / "repo" - if io2.createPath(repoDir).isErr: - trace "Unable to create data directory for block store", dataDir = repoDir - raise (ref Defect)( - msg: "Unable to create data directory for block store: " & repoDir) + repoData = case config.repoKind + of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5) + .expect("Should create repo file data store!")) + of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir) + .expect("Should create repo SQLite data store!")) + of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir) + .expect("Should create repo LevelDB data store!")) + + repoStore = RepoStore.new( + repoDs = repoData, + metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace) + .expect("Should create metadata store!"), + quotaMaxBytes = config.storageQuota, + blockTtl = config.blockTtl) + + maintenance = BlockMaintainer.new( + repoStore, + interval = config.blockMaintenanceInterval, + numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks) - let - localStore = FSStore.new(repoDir, cache = cache) peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() - blockDiscovery = DiscoveryEngine.new(localStore, peerStore, network, discovery, pendingBlocks) - engine = BlockExcEngine.new(localStore, wallet, network, blockDiscovery, peerStore, pendingBlocks) - store = NetworkStore.new(engine, localStore) - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) - contracts = ContractInteractions.new(config) - codexNode = CodexNodeRef.new(switch, store, engine, erasure, discovery, contracts) + blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) + engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks) + store = NetworkStore.new(engine, repoStore) + prover = if config.prover: + if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and + endsWith($config.circomR1cs, ".r1cs"): + error "Circom R1CS file not accessible" + raise (ref Defect)( + msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)") + + if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and + endsWith($config.circomWasm, ".wasm"): + error "Circom wasm file not accessible" + raise (ref Defect)( + msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)") + + let zkey = if not config.circomNoZkey: + if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and + endsWith($config.circomZkey, ".zkey"): + error "Circom zkey file not accessible" + raise (ref Defect)( + msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)") + + $config.circomZkey + else: "" + + some Prover.new( + store, + CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey), + config.numProofSamples) + else: + none Prover + + taskpool = Taskpool.new(num_threads = countProcessors()) + + codexNode = CodexNodeRef.new( + switch = switch, + networkStore = store, + engine = engine, + prover = prover, + discovery = discovery, + taskpool = taskpool) + restServer = RestServerRef.new( - codexNode.initRestApi(config), - initTAddress("127.0.0.1" , config.apiPort), + codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), + initTAddress(config.apiBindAddress , config.apiPort), bufferSize = (1024 * 64), maxRequestBodySize = int.high) .expect("Should start rest server!") switch.mount(network) - T( + + CodexServer( config: config, codexNode: codexNode, - restServer: restServer) + restServer: restServer, + repoStore: repoStore, + maintenance: maintenance, + taskpool: taskpool) diff --git a/codex/codextypes.nim b/codex/codextypes.nim new file mode 100644 index 00000000..2fd15d1e --- /dev/null +++ b/codex/codextypes.nim @@ -0,0 +1,113 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [].} + +import std/tables +import std/sugar + +import pkg/libp2p/multicodec +import pkg/libp2p/multihash +import pkg/libp2p/cid +import pkg/results +import pkg/questionable/results + +import ./units +import ./errors + +export tables + +const + # Size of blocks for storage / network exchange, + DefaultBlockSize* = NBytes 1024*64 + DefaultCellSize* = NBytes 2048 + + # Proving defaults + DefaultMaxSlotDepth* = 32 + DefaultMaxDatasetDepth* = 8 + DefaultBlockDepth* = 5 + DefaultCellElms* = 67 + DefaultSamplesNum* = 5 + + # hashes + Sha256HashCodec* = multiCodec("sha2-256") + Sha512HashCodec* = multiCodec("sha2-512") + Pos2Bn128SpngCodec* = multiCodec("poseidon2-alt_bn_128-sponge-r2") + Pos2Bn128MrklCodec* = multiCodec("poseidon2-alt_bn_128-merkle-2kb") + + ManifestCodec* = multiCodec("codex-manifest") + DatasetRootCodec* = multiCodec("codex-root") + BlockCodec* = multiCodec("codex-block") + SlotRootCodec* = multiCodec("codex-slot-root") + SlotProvingRootCodec* = multiCodec("codex-proving-root") + CodexSlotCellCodec* = multiCodec("codex-slot-cell") + + CodexHashesCodecs* = [ + Sha256HashCodec, + Pos2Bn128SpngCodec, + Pos2Bn128MrklCodec + ] + + CodexPrimitivesCodecs* = [ + ManifestCodec, + DatasetRootCodec, + BlockCodec, + SlotRootCodec, + SlotProvingRootCodec, + CodexSlotCellCodec, + ] + +proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] = + ## Initialize padding blocks table + ## + ## TODO: Ideally this is done at compile time, but for now + ## we do it at runtime because of an `importc` error that is + ## coming from somewhere in MultiHash that I can't track down. + ## + + let + emptyData: seq[byte] = @[] + PadHashes = { + Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure, + Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure, + }.toTable + + var + table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]() + + for hcodec, mhash in PadHashes.pairs: + table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure + + success table + +proc emptyCid*( + version: CidVersion, + hcodec: MultiCodec, + dcodec: MultiCodec): ?!Cid = + ## Returns cid representing empty content, + ## given cid version, hash codec and data codec + ## + + var + table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid] + + once: + table = ? initEmptyCidTable() + + table[(version, hcodec, dcodec)].catch + +proc emptyDigest*( + version: CidVersion, + hcodec: MultiCodec, + dcodec: MultiCodec): ?!MultiHash = + ## Returns hash representing empty content, + ## given cid version, hash codec and data codec + ## + emptyCid(version, hcodec, dcodec) + .flatMap((cid: Cid) => cid.mhash.mapFailure) diff --git a/codex/conf.nim b/codex/conf.nim index 2e211d99..fb7548c7 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: {.upraises: [].} +{.push raises: [].} import std/os import std/terminal @@ -17,37 +15,84 @@ import std/options import std/strutils import std/typetraits -import pkg/chronicles +import pkg/chronos import pkg/chronicles/helpers import pkg/chronicles/topics_registry import pkg/confutils/defs import pkg/confutils/std/net +import pkg/toml_serialization import pkg/metrics import pkg/metrics/chronos_httpserver import pkg/stew/shims/net as stewnet +import pkg/stew/shims/parseutils +import pkg/stew/byteutils import pkg/libp2p import pkg/ethers +import pkg/questionable +import pkg/questionable/results +import ./codextypes import ./discovery -import ./stores/cachestore +import ./logutils +import ./stores +import ./units +import ./utils -export DefaultCacheSizeMiB, net +export units, net, codextypes, logutils + +export + DefaultQuotaBytes, + DefaultBlockTtl, + DefaultBlockMaintenanceInterval, + DefaultNumberOfBlocksToMaintainPerInterval + +proc defaultDataDir*(): string = + let dataDir = when defined(windows): + "AppData" / "Roaming" / "Codex" + elif defined(macosx): + "Library" / "Application Support" / "Codex" + else: + ".cache" / "codex" + + getHomeDir() / dataDir + +const + codex_enable_api_debug_peers* {.booldefine.} = false + codex_enable_proof_failures* {.booldefine.} = false + codex_enable_log_counter* {.booldefine.} = false + + DefaultDataDir* = defaultDataDir() type - StartUpCommand* {.pure.} = enum - noCommand, - initNode + StartUpCmd* {.pure.} = enum + noCmd + persistence - LogKind* = enum + PersistenceCmd* {.pure.} = enum + noCmd + prover + + LogKind* {.pure.} = enum Auto = "auto" Colors = "colors" NoColors = "nocolors" Json = "json" None = "none" + RepoKind* = enum + repoFS = "fs" + repoSQLite = "sqlite" + repoLevelDb = "leveldb" + CodexConf* = object + configFile* {. + desc: "Loads the configuration from a TOML file" + defaultValueDesc: "none" + defaultValue: InputFile.none + name: "config-file" }: Option[InputFile] + logLevel* {. - defaultValue: "INFO" + defaultValue: "info" desc: "Sets the log level", name: "log-level" }: string @@ -75,86 +120,132 @@ type name: "metrics-port" }: Port dataDir* {. - desc: "The directory where codex will store configuration and data." - defaultValue: defaultDataDir() - defaultValueDesc: "" + desc: "The directory where codex will store configuration and data" + defaultValue: DefaultDataDir + defaultValueDesc: $DefaultDataDir abbr: "d" name: "data-dir" }: OutDir + listenAddrs* {. + desc: "Multi Addresses to listen on" + defaultValue: @[ + MultiAddress.init("/ip4/0.0.0.0/tcp/0") + .expect("Should init multiaddress")] + defaultValueDesc: "/ip4/0.0.0.0/tcp/0" + abbr: "i" + name: "listen-addrs" }: seq[MultiAddress] + + # TODO: change this once we integrate nat support + nat* {. + desc: "IP Addresses to announce behind a NAT" + defaultValue: ValidIpAddress.init("127.0.0.1") + defaultValueDesc: "127.0.0.1" + abbr: "a" + name: "nat" }: ValidIpAddress + + discoveryIp* {. + desc: "Discovery listen address" + defaultValue: ValidIpAddress.init(IPv4_any()) + defaultValueDesc: "0.0.0.0" + abbr: "e" + name: "disc-ip" }: ValidIpAddress + + discoveryPort* {. + desc: "Discovery (UDP) port" + defaultValue: 8090.Port + defaultValueDesc: "8090" + abbr: "u" + name: "disc-port" }: Port + + netPrivKeyFile* {. + desc: "Source of network (secp256k1) private key file path or name" + defaultValue: "key" + name: "net-privkey" }: string + + bootstrapNodes* {. + desc: "Specifies one or more bootstrap nodes to use when connecting to the network" + abbr: "b" + name: "bootstrap-node" }: seq[SignedPeerRecord] + + maxPeers* {. + desc: "The maximum number of peers to connect to" + defaultValue: 160 + name: "max-peers" }: int + + agentString* {. + defaultValue: "Codex" + desc: "Node agent string which is used as identifier in network" + name: "agent-string" }: string + + apiBindAddress* {. + desc: "The REST API bind address" + defaultValue: "127.0.0.1" + name: "api-bindaddr" + }: string + + apiPort* {. + desc: "The REST Api port", + defaultValue: 8080.Port + defaultValueDesc: "8080" + name: "api-port" + abbr: "p" }: Port + + apiCorsAllowedOrigin* {. + desc: "The REST Api CORS allowed origin for downloading data. '*' will allow all origins, '' will allow none.", + defaultValue: string.none + defaultValueDesc: "Disallow all cross origin requests to download data" + name: "api-cors-origin" }: Option[string] + + repoKind* {. + desc: "Backend for main repo store (fs, sqlite, leveldb)" + defaultValueDesc: "fs" + defaultValue: repoFS + name: "repo-kind" }: RepoKind + + storageQuota* {. + desc: "The size of the total storage quota dedicated to the node" + defaultValue: DefaultQuotaBytes + defaultValueDesc: $DefaultQuotaBytes + name: "storage-quota" + abbr: "q" }: NBytes + + blockTtl* {. + desc: "Default block timeout in seconds - 0 disables the ttl" + defaultValue: DefaultBlockTtl + defaultValueDesc: $DefaultBlockTtl + name: "block-ttl" + abbr: "t" }: Duration + + blockMaintenanceInterval* {. + desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup" + defaultValue: DefaultBlockMaintenanceInterval + defaultValueDesc: $DefaultBlockMaintenanceInterval + name: "block-mi" }: Duration + + blockMaintenanceNumberOfBlocks* {. + desc: "Number of blocks to check every maintenance cycle" + defaultValue: DefaultNumberOfBlocksToMaintainPerInterval + defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval + name: "block-mn" }: int + + cacheSize* {. + desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives" + defaultValue: 0 + defaultValueDesc: "0" + name: "cache-size" + abbr: "c" }: NBytes + + logFile* {. + desc: "Logs to file" + defaultValue: string.none + name: "log-file" + hidden + .}: Option[string] + case cmd* {. - command - defaultValue: noCommand }: StartUpCommand - - of noCommand: - listenAddrs* {. - desc: "Multi Addresses to listen on" - defaultValue: @[ - MultiAddress.init("/ip4/0.0.0.0/tcp/0") - .expect("Should init multiaddress")] - defaultValueDesc: "/ip4/0.0.0.0/tcp/0" - abbr: "i" - name: "listen-addrs" }: seq[MultiAddress] - - nat* {. - # TODO: change this once we integrate nat support - desc: "IP Addresses to announce behind a NAT" - defaultValue: ValidIpAddress.init("127.0.0.1") - defaultValueDesc: "127.0.0.1" - abbr: "a" - name: "nat" }: ValidIpAddress - - discoveryIp* {. - desc: "Discovery listen address" - defaultValue: ValidIpAddress.init(IPv4_any()) - defaultValueDesc: "0.0.0.0" - name: "disc-ip" }: ValidIpAddress - - discoveryPort* {. - desc: "Discovery (UDP) port" - defaultValue: Port(8090) - defaultValueDesc: "8090" - name: "disc-port" }: Port - - netPrivKeyFile* {. - desc: "Source of network (secp256k1) private key file path or name" - defaultValue: "key" - name: "net-privkey" }: string - - bootstrapNodes* {. - desc: "Specifies one or more bootstrap nodes to use when connecting to the network." - abbr: "b" - name: "bootstrap-node" }: seq[SignedPeerRecord] - - maxPeers* {. - desc: "The maximum number of peers to connect to" - defaultValue: 160 - name: "max-peers" }: int - - agentString* {. - defaultValue: "Codex" - desc: "Node agent string which is used as identifier in network" - name: "agent-string" }: string - - apiPort* {. - desc: "The REST Api port", - defaultValue: 8080 - defaultValueDesc: "8080" - name: "api-port" - abbr: "p" }: int - - cacheSize* {. - desc: "The size in MiB of the block cache, 0 disables the cache - might help on slow hardrives" - defaultValue: 0 - defaultValueDesc: "0" - name: "cache-size" - abbr: "c" }: Natural - - persistence* {. - desc: "Enables persistence mechanism, requires an Ethereum node" - defaultValue: false - name: "persistence" - .}: bool - + defaultValue: noCmd + command }: StartUpCmd + of persistence: ethProvider* {. desc: "The URL of the JSON-RPC API of the Ethereum node" defaultValue: "ws://localhost:8545" @@ -164,66 +255,244 @@ type ethAccount* {. desc: "The Ethereum account that is used for storage contracts" defaultValue: EthAddress.none + defaultValueDesc: "" name: "eth-account" .}: Option[EthAddress] - ethDeployment* {. - desc: "The json file describing the contract deployment" + ethPrivateKey* {. + desc: "File containing Ethereum private key for storage contracts" defaultValue: string.none - name: "eth-deployment" + defaultValueDesc: "" + name: "eth-private-key" .}: Option[string] - of initNode: - discard + marketplaceAddress* {. + desc: "Address of deployed Marketplace contract" + defaultValue: EthAddress.none + defaultValueDesc: "" + name: "marketplace-address" + .}: Option[EthAddress] + + # TODO: should go behind a feature flag + simulateProofFailures* {. + desc: "Simulates proof failures once every N proofs. 0 = disabled." + defaultValue: 0 + name: "simulate-proof-failures" + hidden + .}: int + + validator* {. + desc: "Enables validator, requires an Ethereum node" + defaultValue: false + name: "validator" + .}: bool + + validatorMaxSlots* {. + desc: "Maximum number of slots that the validator monitors" + defaultValue: 1000 + name: "validator-max-slots" + .}: int + + case persistenceCmd* {. + defaultValue: noCmd + command }: PersistenceCmd + + of PersistenceCmd.prover: + circomR1cs* {. + desc: "The r1cs file for the storage circuit" + defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs" + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs" + name: "circom-r1cs" + .}: InputFile + + circomWasm* {. + desc: "The wasm file for the storage circuit" + defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm" + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm" + name: "circom-wasm" + .}: InputFile + + circomZkey* {. + desc: "The zkey file for the storage circuit" + defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey" + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey" + name: "circom-zkey" + .}: InputFile + + # TODO: should probably be hidden and behind a feature flag + circomNoZkey* {. + desc: "Ignore the zkey file - use only for testing!" + defaultValue: false + name: "circom-no-zkey" + .}: bool + + numProofSamples* {. + desc: "Number of samples to prove" + defaultValue: DefaultSamplesNum + defaultValueDesc: $DefaultSamplesNum + name: "proof-samples" }: int + + maxSlotDepth* {. + desc: "The maximum depth of the slot tree" + defaultValue: DefaultMaxSlotDepth + defaultValueDesc: $DefaultMaxSlotDepth + name: "max-slot-depth" }: int + + maxDatasetDepth* {. + desc: "The maximum depth of the dataset tree" + defaultValue: DefaultMaxDatasetDepth + defaultValueDesc: $DefaultMaxDatasetDepth + name: "max-dataset-depth" }: int + + maxBlockDepth* {. + desc: "The maximum depth of the network block merkle tree" + defaultValue: DefaultBlockDepth + defaultValueDesc: $DefaultBlockDepth + name: "max-block-depth" }: int + + maxCellElms* {. + desc: "The maximum number of elements in a cell" + defaultValue: DefaultCellElms + defaultValueDesc: $DefaultCellElms + name: "max-cell-elements" }: int + of PersistenceCmd.noCmd: + discard + + of StartUpCmd.noCmd: + discard # end of persistence EthAddress* = ethers.Address +logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog +logutils.formatIt(LogFormat.json, EthAddress): %it + +func persistence*(self: CodexConf): bool = + self.cmd == StartUpCmd.persistence + +func prover*(self: CodexConf): bool = + self.persistence and self.persistenceCmd == PersistenceCmd.prover + +proc getCodexVersion(): string = + let tag = strip(staticExec("git tag")) + if tag.isEmptyOrWhitespace: + return "untagged build" + return tag + +proc getCodexRevision(): string = + # using a slice in a static context breaks nimsuggest for some reason + var res = strip(staticExec("git rev-parse --short HEAD")) + return res + +proc getNimBanner(): string = + staticExec("nim --version | grep Version") + const - gitRevision* = strip(staticExec("git rev-parse --short HEAD"))[0..5] - - nimBanner* = staticExec("nim --version | grep Version") - - #TODO add versionMajor, Minor & Fix when we switch to semver - codexVersion* = gitRevision + codexVersion* = getCodexVersion() + codexRevision* = getCodexRevision() + nimBanner* = getNimBanner() codexFullVersion* = - "Codex build " & codexVersion & "\p" & + "Codex version: " & codexVersion & "\p" & + "Codex revision: " & codexRevision & "\p" & nimBanner -proc defaultDataDir*(): string = - let dataDir = when defined(windows): - "AppData" / "Roaming" / "Codex" - elif defined(macosx): - "Library" / "Application Support" / "Codex" +proc parseCmdArg*(T: typedesc[MultiAddress], + input: string): MultiAddress + {.upraises: [ValueError, LPError].} = + var ma: MultiAddress + let res = MultiAddress.init(input) + if res.isOk: + ma = res.get() else: - ".cache" / "codex" + warn "Invalid MultiAddress", input=input, error = res.error() + quit QuitFailure + ma - getHomeDir() / dataDir - -func parseCmdArg*(T: type MultiAddress, input: TaintedString): T - {.raises: [ValueError, LPError, Defect].} = - MultiAddress.init($input).tryGet() - -proc parseCmdArg*(T: type SignedPeerRecord, uri: TaintedString): T = +proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = var res: SignedPeerRecord try: if not res.fromURI(uri): - warn "Invalid SignedPeerRecord uri", uri=uri + warn "Invalid SignedPeerRecord uri", uri = uri quit QuitFailure except CatchableError as exc: - warn "Invalid SignedPeerRecord uri", uri=uri, error=exc.msg + warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg quit QuitFailure res -func parseCmdArg*(T: type EthAddress, address: TaintedString): T = +proc parseCmdArg*(T: type EthAddress, address: string): T = EthAddress.init($address).get() +proc parseCmdArg*(T: type NBytes, val: string): T = + var num = 0'i64 + let count = parseSize(val, num, alwaysBin = true) + if count == 0: + warn "Invalid number of bytes", nbytes = val + quit QuitFailure + NBytes(num) + +proc parseCmdArg*(T: type Duration, val: string): T = + var dur: Duration + let count = parseDuration(val, dur) + if count == 0: + warn "Cannot parse duration", dur = dur + quit QuitFailure + dur + +proc readValue*(r: var TomlReader, val: var EthAddress) + {.upraises: [SerializationError, IOError].} = + val = EthAddress.init(r.readValue(string)).get() + +proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = + without uri =? r.readValue(string).catch, err: + error "invalid SignedPeerRecord configuration value", error = err.msg + quit QuitFailure + + val = SignedPeerRecord.parseCmdArg(uri) + +proc readValue*(r: var TomlReader, val: var MultiAddress) = + without input =? r.readValue(string).catch, err: + error "invalid MultiAddress configuration value", error = err.msg + quit QuitFailure + + let res = MultiAddress.init(input) + if res.isOk: + val = res.get() + else: + warn "Invalid MultiAddress", input=input, error=res.error() + quit QuitFailure + +proc readValue*(r: var TomlReader, val: var NBytes) + {.upraises: [SerializationError, IOError].} = + var value = 0'i64 + var str = r.readValue(string) + let count = parseSize(str, value, alwaysBin = true) + if count == 0: + error "invalid number of bytes for configuration value", value = str + quit QuitFailure + val = NBytes(value) + +proc readValue*(r: var TomlReader, val: var Duration) + {.upraises: [SerializationError, IOError].} = + var str = r.readValue(string) + var dur: Duration + let count = parseDuration(str, dur) + if count == 0: + error "Invalid duration parse", value = str + quit QuitFailure + val = dur + # no idea why confutils needs this: -proc completeCmdArg*(T: type EthAddress; val: TaintedString): seq[string] = +proc completeCmdArg*(T: type EthAddress; val: string): seq[string] = + discard + +proc completeCmdArg*(T: type NBytes; val: string): seq[string] = + discard + +proc completeCmdArg*(T: type Duration; val: string): seq[string] = discard # silly chronicles, colors is a compile-time property -proc stripAnsi(v: string): string = +proc stripAnsi*(v: string): string = var res = newStringOfCap(v.len) i: int @@ -258,13 +527,13 @@ proc stripAnsi(v: string): string = res -proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} = +proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = # Updates log levels (without clearing old ones) let directives = logLevel.split(";") try: - setLogLevel(parseEnum[LogLevel](directives[0])) + setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii)) except ValueError: - raise (ref ValueError)(msg: "Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL") + raise (ref ValueError)(msg: "Please specify one of: trace, debug, info, notice, warn, error or fatal") if directives.len > 1: for topicName, settings in parseTopicDirectives(directives[1..^1]): @@ -272,9 +541,10 @@ proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} = warn "Unrecognized logging topic", topic = topicName proc setupLogging*(conf: CodexConf) = - when defaultChroniclesStream.outputs.type.arity != 2: + when defaultChroniclesStream.outputs.type.arity != 3: warn "Logging configuration options not enabled in the current build" else: + var logFile: ?IoHandle proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard proc writeAndFlush(f: File, msg: LogOutputStr) = try: @@ -289,9 +559,28 @@ proc setupLogging*(conf: CodexConf) = proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) = writeAndFlush(stdout, stripAnsi(msg)) + proc fileFlush(logLevel: LogLevel, msg: LogOutputStr) = + if file =? logFile: + if error =? file.writeFile(stripAnsi(msg).toBytes).errorOption: + error "failed to write to log file", errorCode = $error + + defaultChroniclesStream.outputs[2].writer = noOutput + if logFilePath =? conf.logFile and logFilePath.len > 0: + let logFileHandle = openFile( + logFilePath, + {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate} + ) + if logFileHandle.isErr: + error "failed to open log file", + path = logFilePath, + errorCode = $logFileHandle.error + else: + logFile = logFileHandle.option + defaultChroniclesStream.outputs[2].writer = fileFlush + defaultChroniclesStream.outputs[1].writer = noOutput - defaultChroniclesStream.outputs[0].writer = + let writer = case conf.logFormat: of LogKind.Auto: if isatty(stdout): @@ -306,6 +595,16 @@ proc setupLogging*(conf: CodexConf) = of LogKind.None: noOutput + when codex_enable_log_counter: + var counter = 0.uint64 + proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) = + inc(counter) + let withoutNewLine = msg[0..^2] + writer(logLevel, withoutNewLine & " count=" & $counter & "\n") + defaultChroniclesStream.outputs[0].writer = numberedWriter + else: + defaultChroniclesStream.outputs[0].writer = writer + try: updateLogLevel(conf.logLevel) except ValueError as err: diff --git a/codex/contracts.nim b/codex/contracts.nim index c1c4f7c7..ecf298f4 100644 --- a/codex/contracts.nim +++ b/codex/contracts.nim @@ -1,13 +1,9 @@ import contracts/requests -import contracts/storage -import contracts/deployment +import contracts/marketplace import contracts/market -import contracts/proofs import contracts/interactions export requests -export storage -export deployment +export marketplace export market -export proofs export interactions diff --git a/codex/contracts/Readme.md b/codex/contracts/Readme.md index 6a4ea8d0..cae2a4cc 100644 --- a/codex/contracts/Readme.md +++ b/codex/contracts/Readme.md @@ -20,7 +20,7 @@ import ethers let address = # fill in address where the contract was deployed let provider = JsonRpcProvider.new("ws://localhost:8545") -let storage = Storage.new(address, provider) +let marketplace = Marketplace.new(address, provider) ``` Setup client and host so that they can sign transactions; here we use the first @@ -32,36 +32,6 @@ let client = provider.getSigner(accounts[0]) let host = provider.getSigner(accounts[1]) ``` -Collateral ----------- - -Hosts need to put up collateral before participating in storage contracts. - -A host can learn about the amount of collateral that is required: -```nim -let collateralAmount = await storage.collateralAmount() -``` - -The host then needs to prepare a payment to the smart contract by calling the -`approve` method on the [ERC20 token][2]. Note that interaction with ERC20 -contracts is not part of this library. - -After preparing the payment, the host can deposit collateral: -```nim -await storage - .connect(host) - .deposit(collateralAmount) -``` - -When a host is not participating in storage offers or contracts, it can withdraw -its collateral: - -``` -await storage - .connect(host) - .withdraw() -``` - Storage requests ---------------- @@ -82,9 +52,7 @@ let request : StorageRequest = ( When a client wants to submit this request to the network, it needs to pay the maximum price to the smart contract in advance. The difference between the -maximum price and the offered price will be reimbursed later. To prepare, the -client needs to call the `approve` method on the [ERC20 token][2]. Note that -interaction with ERC20 contracts is not part of this library. +maximum price and the offered price will be reimbursed later. Once the payment has been prepared, the client can submit the request to the network: @@ -151,7 +119,7 @@ Storage proofs Time is divided into periods, and each period a storage proof may be required from the host. The odds of requiring a storage proof are negotiated through the storage request. For more details about the timing of storage proofs, please -refer to the [design document][3]. +refer to the [design document][2]. At the start of each period of time, the host can check whether a storage proof is required: @@ -176,6 +144,5 @@ await storage .markProofAsMissing(id, period) ``` -[1]: https://github.com/status-im/dagger-contracts/ -[2]: https://ethereum.org/en/developers/docs/standards/tokens/erc-20/ -[3]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md +[1]: https://github.com/status-im/codex-contracts-eth/ +[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index ed8e5ef7..937745bf 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -3,41 +3,69 @@ import pkg/ethers import pkg/chronos import pkg/stint import ../clock +import ../conf export clock +logScope: + topics = "contracts clock" + type OnChainClock* = ref object of Clock provider: Provider subscription: Subscription offset: times.Duration + blockNumber: UInt256 started: bool + newBlock: AsyncEvent proc new*(_: type OnChainClock, provider: Provider): OnChainClock = - OnChainClock(provider: provider) + OnChainClock(provider: provider, newBlock: newAsyncEvent()) -proc start*(clock: OnChainClock) {.async.} = - if clock.started: - return - clock.started = true - - proc onBlock(blck: Block) {.async, upraises:[].} = +proc update(clock: OnChainClock, blck: Block) = + if number =? blck.number and number > clock.blockNumber: let blockTime = initTime(blck.timestamp.truncate(int64), 0) let computerTime = getTime() clock.offset = blockTime - computerTime + clock.blockNumber = number + trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset + clock.newBlock.fire() - if latestBlock =? (await clock.provider.getBlock(BlockTag.latest)): - await onBlock(latestBlock) +proc update(clock: OnChainClock) {.async.} = + try: + if latest =? (await clock.provider.getBlock(BlockTag.latest)): + clock.update(latest) + except CancelledError as error: + raise error + except CatchableError as error: + debug "error updating clock: ", error=error.msg + discard + +method start*(clock: OnChainClock) {.async.} = + if clock.started: + return + + proc onBlock(_: Block) = + # ignore block parameter; hardhat may call this with pending blocks + asyncSpawn clock.update() + + await clock.update() clock.subscription = await clock.provider.subscribe(onBlock) + clock.started = true -proc stop*(clock: OnChainClock) {.async.} = +method stop*(clock: OnChainClock) {.async.} = if not clock.started: return - clock.started = false await clock.subscription.unsubscribe() + clock.started = false method now*(clock: OnChainClock): SecondsSince1970 = doAssert clock.started, "clock should be started before calling now()" - toUnix(getTime() + clock.offset) + return toUnix(getTime() + clock.offset) + +method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} = + while (let difference = time - clock.now(); difference > 0): + clock.newBlock.clear() + discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference)) diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim new file mode 100644 index 00000000..76e00207 --- /dev/null +++ b/codex/contracts/config.nim @@ -0,0 +1,78 @@ +import pkg/contractabi +import pkg/ethers/fields +import pkg/questionable/results + +export contractabi + +type + MarketplaceConfig* = object + collateral*: CollateralConfig + proofs*: ProofConfig + CollateralConfig* = object + repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed + maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value + slashCriterion*: uint16 # amount of proofs missed that lead to slashing + slashPercentage*: uint8 # percentage of the collateral that is slashed + ProofConfig* = object + period*: UInt256 # proofs requirements are calculated per period (in seconds) + timeout*: UInt256 # mark proofs as missing before the timeout (in seconds) + downtime*: uint8 # ignore this much recent blocks for proof requirements + zkeyHash*: string # hash of the zkey file which is linked to the verifier + # Ensures the pointer does not remain in downtime for many consecutive + # periods. For each period increase, move the pointer `pointerProduct` + # blocks. Should be a prime number to ensure there are no cycles. + downtimeProduct*: uint8 + + +func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = + ProofConfig( + period: tupl[0], + timeout: tupl[1], + downtime: tupl[2], + zkeyHash: tupl[3], + downtimeProduct: tupl[4] + ) + +func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = + CollateralConfig( + repairRewardPercentage: tupl[0], + maxNumberOfSlashes: tupl[1], + slashCriterion: tupl[2], + slashPercentage: tupl[3] + ) + +func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig = + MarketplaceConfig( + collateral: tupl[0], + proofs: tupl[1] + ) + +func solidityType*(_: type ProofConfig): string = + solidityType(ProofConfig.fieldTypes) + +func solidityType*(_: type CollateralConfig): string = + solidityType(CollateralConfig.fieldTypes) + +func solidityType*(_: type MarketplaceConfig): string = + solidityType(CollateralConfig.fieldTypes) + +func encode*(encoder: var AbiEncoder, slot: ProofConfig) = + encoder.write(slot.fieldValues) + +func encode*(encoder: var AbiEncoder, slot: CollateralConfig) = + encoder.write(slot.fieldValues) + +func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) = + encoder.write(slot.fieldValues) + +func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T = + let tupl = ?decoder.read(ProofConfig.fieldTypes) + success ProofConfig.fromTuple(tupl) + +func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T = + let tupl = ?decoder.read(CollateralConfig.fieldTypes) + success CollateralConfig.fromTuple(tupl) + +func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T = + let tupl = ?decoder.read(MarketplaceConfig.fieldTypes) + success MarketplaceConfig.fromTuple(tupl) diff --git a/codex/contracts/deployment.nim b/codex/contracts/deployment.nim index 2b95a651..b5b2e311 100644 --- a/codex/contracts/deployment.nim +++ b/codex/contracts/deployment.nim @@ -1,26 +1,43 @@ -import std/json import std/os +import std/tables import pkg/ethers import pkg/questionable -type Deployment* = object - json: JsonNode +import ../conf +import ../logutils +import ./marketplace -const defaultFile = "vendor" / "dagger-contracts" / "deployment-localhost.json" +type Deployment* = ref object + provider: Provider + config: CodexConf -## Reads deployment information from a json file. It expects a file that has -## been exported with Hardhat deploy. -## See also: -## https://github.com/wighawag/hardhat-deploy/tree/master#6-hardhat-export -proc deployment*(file = defaultFile): Deployment = - Deployment(json: parseFile(file)) +const knownAddresses = { + # Hardhat localhost network + "31337": { + "Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"), + }.toTable, + # Taiko Alpha-3 Testnet + "167005": { + "Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F") + }.toTable +}.toTable -proc address*(deployment: Deployment, Contract: typedesc): ?Address = - if deployment.json == nil: +proc getKnownAddress(T: type, chainId: UInt256): ?Address = + let id = chainId.toString(10) + notice "Looking for well-known contract address with ChainID ", chainId=id + + if not (id in knownAddresses): return none Address - try: - let address = deployment.json["contracts"][$Contract]["address"].getStr() - Address.init(address) - except KeyError: - none Address + return knownAddresses[id].getOrDefault($T, Address.none) + +proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment = + Deployment(provider: provider, config: config) + +proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} = + when contract is Marketplace: + if address =? deployment.config.marketplaceAddress: + return some address + + let chainId = await deployment.provider.getChainId() + return contract.getKnownAddress(chainId) diff --git a/codex/contracts/interactions.nim b/codex/contracts/interactions.nim index 5f4dcd9c..13eae8a0 100644 --- a/codex/contracts/interactions.nim +++ b/codex/contracts/interactions.nim @@ -1,78 +1,9 @@ -import pkg/ethers -import pkg/chronicles -import ../purchasing -import ../sales -import ../proving -import ./deployment -import ./storage -import ./market -import ./proofs -import ./clock +import ./interactions/interactions +import ./interactions/hostinteractions +import ./interactions/clientinteractions +import ./interactions/validatorinteractions -export purchasing -export sales -export proving -export chronicles - -type - ContractInteractions* = ref object - purchasing*: Purchasing - sales*: Sales - proving*: Proving - clock: OnChainClock - -proc new*(_: type ContractInteractions, - signer: Signer, - deployment: Deployment): ?ContractInteractions = - - without address =? deployment.address(Storage): - error "Unable to determine address of the Storage smart contract" - return none ContractInteractions - - let contract = Storage.new(address, signer) - let market = OnChainMarket.new(contract) - let proofs = OnChainProofs.new(contract) - let clock = OnChainClock.new(signer.provider) - let proving = Proving.new(proofs, clock) - some ContractInteractions( - purchasing: Purchasing.new(market, clock), - sales: Sales.new(market, clock, proving), - proving: proving, - clock: clock - ) - -proc new*(_: type ContractInteractions, - providerUrl: string, - account: Address, - deploymentFile: string = string.default): ?ContractInteractions = - - let provider = JsonRpcProvider.new(providerUrl) - let signer = provider.getSigner(account) - - var deploy: Deployment - try: - if deploymentFile == string.default: - deploy = deployment() - else: - deploy = deployment(deploymentFile) - except IOError as e: - error "Unable to read deployment json", msg = e.msg - return none ContractInteractions - - ContractInteractions.new(signer, deploy) - -proc new*(_: type ContractInteractions, - account: Address): ?ContractInteractions = - ContractInteractions.new("ws://localhost:8545", account) - -proc start*(interactions: ContractInteractions) {.async.} = - await interactions.clock.start() - await interactions.sales.start() - await interactions.proving.start() - await interactions.purchasing.start() - -proc stop*(interactions: ContractInteractions) {.async.} = - await interactions.purchasing.stop() - await interactions.sales.stop() - await interactions.proving.stop() - await interactions.clock.stop() +export interactions +export hostinteractions +export clientinteractions +export validatorinteractions diff --git a/codex/contracts/interactions/clientinteractions.nim b/codex/contracts/interactions/clientinteractions.nim new file mode 100644 index 00000000..78b3bedf --- /dev/null +++ b/codex/contracts/interactions/clientinteractions.nim @@ -0,0 +1,27 @@ +import pkg/ethers + +import ../../purchasing +import ../../logutils +import ../market +import ../clock +import ./interactions + +export purchasing +export logutils + +type + ClientInteractions* = ref object of ContractInteractions + purchasing*: Purchasing + +proc new*(_: type ClientInteractions, + clock: OnChainClock, + purchasing: Purchasing): ClientInteractions = + ClientInteractions(clock: clock, purchasing: purchasing) + +proc start*(self: ClientInteractions) {.async.} = + await procCall ContractInteractions(self).start() + await self.purchasing.start() + +proc stop*(self: ClientInteractions) {.async.} = + await self.purchasing.stop() + await procCall ContractInteractions(self).stop() diff --git a/codex/contracts/interactions/hostinteractions.nim b/codex/contracts/interactions/hostinteractions.nim new file mode 100644 index 00000000..2decfa44 --- /dev/null +++ b/codex/contracts/interactions/hostinteractions.nim @@ -0,0 +1,29 @@ +import pkg/chronos + +import ../../logutils +import ../../sales +import ./interactions + +export sales +export logutils + +type + HostInteractions* = ref object of ContractInteractions + sales*: Sales + +proc new*( + _: type HostInteractions, + clock: Clock, + sales: Sales +): HostInteractions = + ## Create a new HostInteractions instance + ## + HostInteractions(clock: clock, sales: sales) + +method start*(self: HostInteractions) {.async.} = + await procCall ContractInteractions(self).start() + await self.sales.start() + +method stop*(self: HostInteractions) {.async.} = + await self.sales.stop() + await procCall ContractInteractions(self).start() diff --git a/codex/contracts/interactions/interactions.nim b/codex/contracts/interactions/interactions.nim new file mode 100644 index 00000000..d4fddf54 --- /dev/null +++ b/codex/contracts/interactions/interactions.nim @@ -0,0 +1,16 @@ +import pkg/ethers +import ../clock +import ../marketplace +import ../market + +export clock + +type + ContractInteractions* = ref object of RootObj + clock*: Clock + +method start*(self: ContractInteractions) {.async, base.} = + discard + +method stop*(self: ContractInteractions) {.async, base.} = + discard diff --git a/codex/contracts/interactions/validatorinteractions.nim b/codex/contracts/interactions/validatorinteractions.nim new file mode 100644 index 00000000..1aa4026c --- /dev/null +++ b/codex/contracts/interactions/validatorinteractions.nim @@ -0,0 +1,21 @@ +import ./interactions +import ../../validation + +export validation + +type + ValidatorInteractions* = ref object of ContractInteractions + validation: Validation + +proc new*(_: type ValidatorInteractions, + clock: OnChainClock, + validation: Validation): ValidatorInteractions = + ValidatorInteractions(clock: clock, validation: validation) + +proc start*(self: ValidatorInteractions) {.async.} = + await procCall ContractInteractions(self).start() + await self.validation.start() + +proc stop*(self: ValidatorInteractions) {.async.} = + await self.validation.stop() + await procCall ContractInteractions(self).stop() diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 2ed4148f..c874d5db 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,99 +1,282 @@ +import std/sequtils import std/strutils +import std/sugar import pkg/ethers -import pkg/ethers/testing import pkg/upraises import pkg/questionable +import ../utils/exceptions +import ../logutils import ../market -import ./storage +import ./marketplace +import ./proofs export market +logScope: + topics = "marketplace onchain market" + type OnChainMarket* = ref object of Market - contract: Storage + contract: Marketplace signer: Signer MarketSubscription = market.Subscription EventSubscription = ethers.Subscription OnChainMarketSubscription = ref object of MarketSubscription eventSubscription: EventSubscription -func new*(_: type OnChainMarket, contract: Storage): OnChainMarket = +func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket = without signer =? contract.signer: - raiseAssert("Storage contract should have a signer") + raiseAssert("Marketplace contract should have a signer") OnChainMarket( contract: contract, signer: signer, ) +proc raiseMarketError(message: string) {.raises: [MarketError].} = + raise newException(MarketError, message) + +template convertEthersError(body) = + try: + body + except EthersError as error: + raiseMarketError(error.msgDetail) + +proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = + debug "Approving tokens", amount + convertEthersError: + let tokenAddress = await market.contract.token() + let token = Erc20Token.new(tokenAddress, market.signer) + discard await token.increaseAllowance(market.contract.address(), amount).confirm(0) + +method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} = + let config = await market.contract.config() + return some config.proofs.zkeyHash + method getSigner*(market: OnChainMarket): Future[Address] {.async.} = - return await market.signer.getAddress() + convertEthersError: + return await market.signer.getAddress() + +method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = + convertEthersError: + let config = await market.contract.config() + let period = config.proofs.period + return Periodicity(seconds: period) + +method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} = + convertEthersError: + let config = await market.contract.config() + return config.proofs.timeout + +method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = + convertEthersError: + let config = await market.contract.config() + return config.proofs.downtime + +method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} = + convertEthersError: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.getPointer(slotId, overrides) method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = - return await market.contract.myRequests + convertEthersError: + return await market.contract.myRequests + +method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = + convertEthersError: + let slots = await market.contract.mySlots() + debug "Fetched my slots", numSlots=len(slots) + + return slots method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} = - await market.contract.requestStorage(request) + convertEthersError: + debug "Requesting storage" + await market.approveFunds(request.price()) + discard await market.contract.requestStorage(request).confirm(0) method getRequest(market: OnChainMarket, id: RequestId): Future[?StorageRequest] {.async.} = - try: - return some await market.contract.getRequest(id) - except ProviderError as e: - if e.revertReason.contains("Unknown request"): - return none StorageRequest - raise e + convertEthersError: + try: + return some await market.contract.getRequest(id) + except ProviderError as e: + if e.msgDetail.contains("Unknown request"): + return none StorageRequest + raise e -method getState*(market: OnChainMarket, - requestId: RequestId): Future[?RequestState] {.async.} = - try: - return some await market.contract.state(requestId) - except ProviderError as e: - if e.revertReason.contains("Unknown request"): - return none RequestState - raise e +method requestState*(market: OnChainMarket, + requestId: RequestId): Future[?RequestState] {.async.} = + convertEthersError: + try: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return some await market.contract.requestState(requestId, overrides) + except ProviderError as e: + if e.msgDetail.contains("Unknown request"): + return none RequestState + raise e + +method slotState*(market: OnChainMarket, + slotId: SlotId): Future[SlotState] {.async.} = + convertEthersError: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.slotState(slotId, overrides) method getRequestEnd*(market: OnChainMarket, id: RequestId): Future[SecondsSince1970] {.async.} = - return await market.contract.requestEnd(id) + convertEthersError: + return await market.contract.requestEnd(id) + +method requestExpiresAt*(market: OnChainMarket, + id: RequestId): Future[SecondsSince1970] {.async.} = + convertEthersError: + return await market.contract.requestExpiry(id) method getHost(market: OnChainMarket, requestId: RequestId, slotIndex: UInt256): Future[?Address] {.async.} = - let slotId = slotId(requestId, slotIndex) - let address = await market.contract.getHost(slotId) - if address != Address.default: - return some address - else: - return none Address + convertEthersError: + let slotId = slotId(requestId, slotIndex) + let address = await market.contract.getHost(slotId) + if address != Address.default: + return some address + else: + return none Address + +method getActiveSlot*(market: OnChainMarket, + slotId: SlotId): Future[?Slot] {.async.} = + convertEthersError: + try: + return some await market.contract.getActiveSlot(slotId) + except ProviderError as e: + if e.msgDetail.contains("Slot is free"): + return none Slot + raise e method fillSlot(market: OnChainMarket, requestId: RequestId, slotIndex: UInt256, - proof: seq[byte]) {.async.} = - await market.contract.fillSlot(requestId, slotIndex, proof) + proof: Groth16Proof, + collateral: UInt256) {.async.} = + convertEthersError: + await market.approveFunds(collateral) + discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(0) + +method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = + convertEthersError: + discard await market.contract.freeSlot(slotId).confirm(0) method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = - await market.contract.withdrawFunds(requestId) + convertEthersError: + discard await market.contract.withdrawFunds(requestId).confirm(0) -method subscribeRequests(market: OnChainMarket, +method isProofRequired*(market: OnChainMarket, + id: SlotId): Future[bool] {.async.} = + convertEthersError: + try: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.isProofRequired(id, overrides) + except ProviderError as e: + if e.msgDetail.contains("Slot is free"): + return false + raise e + +method willProofBeRequired*(market: OnChainMarket, + id: SlotId): Future[bool] {.async.} = + convertEthersError: + try: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.willProofBeRequired(id, overrides) + except ProviderError as e: + if e.msgDetail.contains("Slot is free"): + return false + raise e + +method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} = + convertEthersError: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.getChallenge(id, overrides) + +method submitProof*(market: OnChainMarket, + id: SlotId, + proof: Groth16Proof) {.async.} = + convertEthersError: + discard await market.contract.submitProof(id, proof).confirm(0) + +method markProofAsMissing*(market: OnChainMarket, + id: SlotId, + period: Period) {.async.} = + convertEthersError: + discard await market.contract.markProofAsMissing(id, period).confirm(0) + +method canProofBeMarkedAsMissing*( + market: OnChainMarket, + id: SlotId, + period: Period +): Future[bool] {.async.} = + let provider = market.contract.provider + let contractWithoutSigner = market.contract.connect(provider) + let overrides = CallOverrides(blockTag: some BlockTag.pending) + try: + discard await contractWithoutSigner.markProofAsMissing(id, period, overrides) + return true + except EthersError as e: + trace "Proof cannot be marked as missing", msg = e.msg + return false + +method subscribeRequests*(market: OnChainMarket, callback: OnRequest): Future[MarketSubscription] {.async.} = proc onEvent(event: StorageRequested) {.upraises:[].} = - callback(event.requestId, event.ask) - let subscription = await market.contract.subscribe(StorageRequested, onEvent) - return OnChainMarketSubscription(eventSubscription: subscription) + callback(event.requestId, + event.ask, + event.expiry) + + convertEthersError: + let subscription = await market.contract.subscribe(StorageRequested, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + +method subscribeSlotFilled*(market: OnChainMarket, + callback: OnSlotFilled): + Future[MarketSubscription] {.async.} = + proc onEvent(event: SlotFilled) {.upraises:[].} = + callback(event.requestId, event.slotIndex) + + convertEthersError: + let subscription = await market.contract.subscribe(SlotFilled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) method subscribeSlotFilled*(market: OnChainMarket, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled): Future[MarketSubscription] {.async.} = - proc onEvent(event: SlotFilled) {.upraises:[].} = - if event.requestId == requestId and event.slotIndex == slotIndex: - callback(event.requestId, event.slotIndex) - let subscription = await market.contract.subscribe(SlotFilled, onEvent) - return OnChainMarketSubscription(eventSubscription: subscription) + proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) = + if eventRequestId == requestId and eventSlotIndex == slotIndex: + callback(requestId, slotIndex) + + convertEthersError: + return await market.subscribeSlotFilled(onSlotFilled) + +method subscribeSlotFreed*(market: OnChainMarket, + callback: OnSlotFreed): + Future[MarketSubscription] {.async.} = + proc onEvent(event: SlotFreed) {.upraises:[].} = + callback(event.requestId, event.slotIndex) + + convertEthersError: + let subscription = await market.contract.subscribe(SlotFreed, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + +method subscribeFulfillment(market: OnChainMarket, + callback: OnFulfillment): + Future[MarketSubscription] {.async.} = + proc onEvent(event: RequestFulfilled) {.upraises:[].} = + callback(event.requestId) + + convertEthersError: + let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) method subscribeFulfillment(market: OnChainMarket, requestId: RequestId, @@ -102,8 +285,20 @@ method subscribeFulfillment(market: OnChainMarket, proc onEvent(event: RequestFulfilled) {.upraises:[].} = if event.requestId == requestId: callback(event.requestId) - let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) - return OnChainMarketSubscription(eventSubscription: subscription) + + convertEthersError: + let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + +method subscribeRequestCancelled*(market: OnChainMarket, + callback: OnRequestCancelled): + Future[MarketSubscription] {.async.} = + proc onEvent(event: RequestCancelled) {.upraises:[].} = + callback(event.requestId) + + convertEthersError: + let subscription = await market.contract.subscribe(RequestCancelled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) method subscribeRequestCancelled*(market: OnChainMarket, requestId: RequestId, @@ -112,18 +307,63 @@ method subscribeRequestCancelled*(market: OnChainMarket, proc onEvent(event: RequestCancelled) {.upraises:[].} = if event.requestId == requestId: callback(event.requestId) - let subscription = await market.contract.subscribe(RequestCancelled, onEvent) - return OnChainMarketSubscription(eventSubscription: subscription) + + convertEthersError: + let subscription = await market.contract.subscribe(RequestCancelled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + +method subscribeRequestFailed*(market: OnChainMarket, + callback: OnRequestFailed): + Future[MarketSubscription] {.async.} = + proc onEvent(event: RequestFailed) {.upraises:[]} = + callback(event.requestId) + + convertEthersError: + let subscription = await market.contract.subscribe(RequestFailed, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) method subscribeRequestFailed*(market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed): Future[MarketSubscription] {.async.} = - proc onEvent(event: RequestFailed) {.upraises:[].} = + proc onEvent(event: RequestFailed) {.upraises:[]} = if event.requestId == requestId: callback(event.requestId) - let subscription = await market.contract.subscribe(RequestFailed, onEvent) - return OnChainMarketSubscription(eventSubscription: subscription) + + convertEthersError: + let subscription = await market.contract.subscribe(RequestFailed, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + +method subscribeProofSubmission*(market: OnChainMarket, + callback: OnProofSubmitted): + Future[MarketSubscription] {.async.} = + proc onEvent(event: ProofSubmitted) {.upraises: [].} = + callback(event.id) + + convertEthersError: + let subscription = await market.contract.subscribe(ProofSubmitted, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = await subscription.eventSubscription.unsubscribe() + +method queryPastStorageRequests*(market: OnChainMarket, + blocksAgo: int): + Future[seq[PastStorageRequest]] {.async.} = + convertEthersError: + let contract = market.contract + let provider = contract.provider + + let head = await provider.getBlockNumber() + let fromBlock = BlockTag.init(head - blocksAgo.abs.u256) + + let events = await contract.queryFilter(StorageRequested, + fromBlock, + BlockTag.latest) + return events.map(event => + PastStorageRequest( + requestId: event.requestId, + ask: event.ask, + expiry: event.expiry + ) + ) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim new file mode 100644 index 00000000..301f8c25 --- /dev/null +++ b/codex/contracts/marketplace.nim @@ -0,0 +1,70 @@ +import pkg/ethers +import pkg/ethers/erc20 +import pkg/json_rpc/rpcclient +import pkg/stint +import pkg/chronos +import ../clock +import ./requests +import ./proofs +import ./config + +export stint +export ethers except `%`, `%*`, toJson +export erc20 except `%`, `%*`, toJson +export config +export requests + +type + Marketplace* = ref object of Contract + StorageRequested* = object of Event + requestId*: RequestId + ask*: StorageAsk + expiry*: UInt256 + SlotFilled* = object of Event + requestId* {.indexed.}: RequestId + slotIndex*: UInt256 + SlotFreed* = object of Event + requestId* {.indexed.}: RequestId + slotIndex*: UInt256 + RequestFulfilled* = object of Event + requestId* {.indexed.}: RequestId + RequestCancelled* = object of Event + requestId* {.indexed.}: RequestId + RequestFailed* = object of Event + requestId* {.indexed.}: RequestId + ProofSubmitted* = object of Event + id*: SlotId + + +proc config*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} +proc token*(marketplace: Marketplace): Address {.contract, view.} +proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.} +proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.} +proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} + +proc requestStorage*(marketplace: Marketplace, request: StorageRequest): ?TransactionResponse {.contract.} +proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): ?TransactionResponse {.contract.} +proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): ?TransactionResponse {.contract.} +proc freeSlot*(marketplace: Marketplace, id: SlotId): ?TransactionResponse {.contract.} +proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.} +proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.} +proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.} + +proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.} +proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.} +proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.} +proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.} +proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.} +proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.} + +proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.} + +proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} +proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} +proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} +proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} +proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.} +proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.} + +proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): ?TransactionResponse {.contract.} +proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): ?TransactionResponse {.contract.} diff --git a/codex/contracts/proofs.nim b/codex/contracts/proofs.nim index f67073a1..a7a59351 100644 --- a/codex/contracts/proofs.nim +++ b/codex/contracts/proofs.nim @@ -1,68 +1,43 @@ -import std/strutils -import pkg/ethers -import pkg/ethers/testing -import ../storageproofs/timing/proofs -import ./storage - -export proofs +import pkg/stint +import pkg/contractabi +import pkg/ethers/fields type - OnChainProofs* = ref object of Proofs - storage: Storage - pollInterval*: Duration - ProofsSubscription = proofs.Subscription - EventSubscription = ethers.Subscription - OnChainProofsSubscription = ref object of ProofsSubscription - eventSubscription: EventSubscription + Groth16Proof* = object + a*: G1Point + b*: G2Point + c*: G1Point + G1Point* = object + x*: UInt256 + y*: UInt256 + # A field element F_{p^2} encoded as `real + i * imag` + Fp2Element* = object + real*: UInt256 + imag*: UInt256 + G2Point* = object + x*: Fp2Element + y*: Fp2Element -const DefaultPollInterval = 3.seconds +func solidityType*(_: type G1Point): string = + solidityType(G1Point.fieldTypes) -proc new*(_: type OnChainProofs, storage: Storage): OnChainProofs = - OnChainProofs(storage: storage, pollInterval: DefaultPollInterval) +func solidityType*(_: type Fp2Element): string = + solidityType(Fp2Element.fieldTypes) -method periodicity*(proofs: OnChainProofs): Future[Periodicity] {.async.} = - let period = await proofs.storage.proofPeriod() - return Periodicity(seconds: period) +func solidityType*(_: type G2Point): string = + solidityType(G2Point.fieldTypes) -method isProofRequired*(proofs: OnChainProofs, - id: SlotId): Future[bool] {.async.} = - try: - return await proofs.storage.isProofRequired(id) - except ProviderError as e: - if e.revertReason.contains("Slot empty"): - return false - raise e +func solidityType*(_: type Groth16Proof): string = + solidityType(Groth16Proof.fieldTypes) -method willProofBeRequired*(proofs: OnChainProofs, - id: SlotId): Future[bool] {.async.} = - try: - return await proofs.storage.willProofBeRequired(id) - except ProviderError as e: - if e.revertReason.contains("Slot empty"): - return false - raise e +func encode*(encoder: var AbiEncoder, point: G1Point) = + encoder.write(point.fieldValues) -method getProofEnd*(proofs: OnChainProofs, - id: SlotId): Future[UInt256] {.async.} = - try: - return await proofs.storage.proofEnd(id) - except ProviderError as e: - if e.revertReason.contains("Slot empty"): - return 0.u256 - raise e +func encode*(encoder: var AbiEncoder, element: Fp2Element) = + encoder.write(element.fieldValues) -method submitProof*(proofs: OnChainProofs, - id: SlotId, - proof: seq[byte]) {.async.} = - await proofs.storage.submitProof(id, proof) +func encode*(encoder: var AbiEncoder, point: G2Point) = + encoder.write(point.fieldValues) -method subscribeProofSubmission*(proofs: OnChainProofs, - callback: OnProofSubmitted): - Future[ProofsSubscription] {.async.} = - proc onEvent(event: ProofSubmitted) {.upraises: [].} = - callback(event.id, event.proof) - let subscription = await proofs.storage.subscribe(ProofSubmitted, onEvent) - return OnChainProofsSubscription(eventSubscription: subscription) - -method unsubscribe*(subscription: OnChainProofsSubscription) {.async, upraises:[].} = - await subscription.eventSubscription.unsubscribe() +func encode*(encoder: var AbiEncoder, proof: Groth16Proof) = + encoder.write(proof.fieldValues) diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 15d7b5c4..1363fb9d 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -1,35 +1,38 @@ import std/hashes +import std/sequtils +import std/typetraits import pkg/contractabi import pkg/nimcrypto import pkg/ethers/fields import pkg/questionable/results +import pkg/stew/byteutils +import pkg/upraises +import ../logutils +import ../utils/json export contractabi type StorageRequest* = object - client*: Address - ask*: StorageAsk - content*: StorageContent - expiry*: UInt256 + client* {.serialize.}: Address + ask* {.serialize.}: StorageAsk + content* {.serialize.}: StorageContent + expiry* {.serialize.}: UInt256 nonce*: Nonce StorageAsk* = object - slots*: uint64 - slotSize*: UInt256 - duration*: UInt256 - proofProbability*: UInt256 - reward*: UInt256 - maxSlotLoss*: uint64 + slots* {.serialize.}: uint64 + slotSize* {.serialize.}: UInt256 + duration* {.serialize.}: UInt256 + proofProbability* {.serialize.}: UInt256 + reward* {.serialize.}: UInt256 + collateral* {.serialize.}: UInt256 + maxSlotLoss* {.serialize.}: uint64 StorageContent* = object - cid*: string - erasure*: StorageErasure - por*: StoragePoR - StorageErasure* = object - totalChunks*: uint64 - StoragePoR* = object - u*: seq[byte] - publicKey*: seq[byte] - name*: seq[byte] + cid* {.serialize.}: string + merkleRoot*: array[32, byte] + Slot* = object + request* {.serialize.}: StorageRequest + slotIndex* {.serialize.}: UInt256 SlotId* = distinct array[32, byte] RequestId* = distinct array[32, byte] Nonce* = distinct array[32, byte] @@ -39,11 +42,20 @@ type Cancelled Finished Failed + SlotState* {.pure.} = enum + Free + Filled + Finished + Failed + Paid + Cancelled proc `==`*(x, y: Nonce): bool {.borrow.} proc `==`*(x, y: RequestId): bool {.borrow.} proc `==`*(x, y: SlotId): bool {.borrow.} proc hash*(x: SlotId): Hash {.borrow.} +proc hash*(x: Nonce): Hash {.borrow.} +proc hash*(x: Address): Hash {.borrow.} func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] = array[32, byte](id) @@ -51,6 +63,30 @@ func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] = proc `$`*(id: RequestId | SlotId | Nonce): string = id.toArray.toHex +proc fromHex*(T: type RequestId, hex: string): T = + T array[32, byte].fromHex(hex) + +proc fromHex*(T: type SlotId, hex: string): T = + T array[32, byte].fromHex(hex) + +proc fromHex*(T: type Nonce, hex: string): T = + T array[32, byte].fromHex(hex) + +proc fromHex*[T: distinct](_: type T, hex: string): T = + type baseType = T.distinctBase + T baseType.fromHex(hex) + +proc toHex*[T: distinct](id: T): string = + type baseType = T.distinctBase + baseType(id).toHex + +logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog +logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog +logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog +logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog +logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog +logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog + func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest = StorageRequest( client: tupl[0], @@ -60,6 +96,12 @@ func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest = nonce: tupl[4] ) +func fromTuple(_: type Slot, tupl: tuple): Slot = + Slot( + request: tupl[0], + slotIndex: tupl[1] + ) + func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = StorageAsk( slots: tupl[0], @@ -67,34 +109,16 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = duration: tupl[2], proofProbability: tupl[3], reward: tupl[4], - maxSlotLoss: tupl[5] + collateral: tupl[5], + maxSlotLoss: tupl[6] ) func fromTuple(_: type StorageContent, tupl: tuple): StorageContent = StorageContent( cid: tupl[0], - erasure: tupl[1], - por: tupl[2] + merkleRoot: tupl[1] ) -func fromTuple(_: type StorageErasure, tupl: tuple): StorageErasure = - StorageErasure( - totalChunks: tupl[0] - ) - -func fromTuple(_: type StoragePoR, tupl: tuple): StoragePoR = - StoragePoR( - u: tupl[0], - publicKey: tupl[1], - name: tupl[2] - ) - -func solidityType*(_: type StoragePoR): string = - solidityType(StoragePoR.fieldTypes) - -func solidityType*(_: type StorageErasure): string = - solidityType(StorageErasure.fieldTypes) - func solidityType*(_: type StorageContent): string = solidityType(StorageContent.fieldTypes) @@ -104,15 +128,6 @@ func solidityType*(_: type StorageAsk): string = func solidityType*(_: type StorageRequest): string = solidityType(StorageRequest.fieldTypes) -func solidityType*[T: RequestId | SlotId | Nonce](_: type T): string = - solidityType(array[32, byte]) - -func encode*(encoder: var AbiEncoder, por: StoragePoR) = - encoder.write(por.fieldValues) - -func encode*(encoder: var AbiEncoder, erasure: StorageErasure) = - encoder.write(erasure.fieldValues) - func encode*(encoder: var AbiEncoder, content: StorageContent) = encoder.write(content.fieldValues) @@ -125,18 +140,8 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) = func encode*(encoder: var AbiEncoder, request: StorageRequest) = encoder.write(request.fieldValues) -func decode*[T: RequestId | SlotId | Nonce](decoder: var AbiDecoder, - _: type T): ?!T = - let nonce = ?decoder.read(type array[32, byte]) - success T(nonce) - -func decode*(decoder: var AbiDecoder, T: type StoragePoR): ?!T = - let tupl = ?decoder.read(StoragePoR.fieldTypes) - success StoragePoR.fromTuple(tupl) - -func decode*(decoder: var AbiDecoder, T: type StorageErasure): ?!T = - let tupl = ?decoder.read(StorageErasure.fieldTypes) - success StorageErasure.fromTuple(tupl) +func encode*(encoder: var AbiEncoder, request: Slot) = + encoder.write(request.fieldValues) func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T = let tupl = ?decoder.read(StorageContent.fieldTypes) @@ -150,6 +155,10 @@ func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T = let tupl = ?decoder.read(StorageRequest.fieldTypes) success StorageRequest.fromTuple(tupl) +func decode*(decoder: var AbiDecoder, T: type Slot): ?!T = + let tupl = ?decoder.read(Slot.fieldTypes) + success Slot.fromTuple(tupl) + func id*(request: StorageRequest): RequestId = let encoding = AbiEncoder.encode((request, )) RequestId(keccak256.digest(encoding).data) @@ -161,6 +170,9 @@ func slotId*(requestId: RequestId, slot: UInt256): SlotId = func slotId*(request: StorageRequest, slot: UInt256): SlotId = slotId(request.id, slot) +func id*(slot: Slot): SlotId = + slotId(slot.request, slot.slotIndex) + func pricePerSlot*(ask: StorageAsk): UInt256 = ask.duration * ask.reward diff --git a/codex/contracts/storage.nim b/codex/contracts/storage.nim deleted file mode 100644 index b83a4988..00000000 --- a/codex/contracts/storage.nim +++ /dev/null @@ -1,62 +0,0 @@ -import pkg/ethers -import pkg/json_rpc/rpcclient -import pkg/stint -import pkg/chronos -import ../clock -import ./requests - -export stint -export ethers - -type - Storage* = ref object of Contract - StorageRequested* = object of Event - requestId*: RequestId - ask*: StorageAsk - SlotFilled* = object of Event - requestId* {.indexed.}: RequestId - slotIndex* {.indexed.}: UInt256 - slotId*: SlotId - RequestFulfilled* = object of Event - requestId* {.indexed.}: RequestId - RequestCancelled* = object of Event - requestId* {.indexed.}: RequestId - RequestFailed* = object of Event - requestId* {.indexed.}: RequestId - ProofSubmitted* = object of Event - id*: SlotId - proof*: seq[byte] - - -proc collateralAmount*(storage: Storage): UInt256 {.contract, view.} -proc slashMisses*(storage: Storage): UInt256 {.contract, view.} -proc slashPercentage*(storage: Storage): UInt256 {.contract, view.} -proc minCollateralThreshold*(storage: Storage): UInt256 {.contract, view.} - -proc deposit*(storage: Storage, amount: UInt256) {.contract.} -proc withdraw*(storage: Storage) {.contract.} -proc balanceOf*(storage: Storage, account: Address): UInt256 {.contract, view.} - -proc requestStorage*(storage: Storage, request: StorageRequest) {.contract.} -proc fillSlot*(storage: Storage, requestId: RequestId, slotIndex: UInt256, proof: seq[byte]) {.contract.} -proc withdrawFunds*(storage: Storage, requestId: RequestId) {.contract.} -proc payoutSlot*(storage: Storage, requestId: RequestId, slotIndex: UInt256) {.contract.} -proc getRequest*(storage: Storage, id: RequestId): StorageRequest {.contract, view.} -proc getHost*(storage: Storage, id: SlotId): Address {.contract, view.} - -proc myRequests*(storage: Storage): seq[RequestId] {.contract, view.} -proc state*(storage: Storage, requestId: RequestId): RequestState {.contract, view.} -proc requestEnd*(storage: Storage, requestId: RequestId): SecondsSince1970 {.contract, view.} - -proc proofPeriod*(storage: Storage): UInt256 {.contract, view.} -proc proofTimeout*(storage: Storage): UInt256 {.contract, view.} - -proc proofEnd*(storage: Storage, id: SlotId): UInt256 {.contract, view.} -proc missingProofs*(storage: Storage, id: SlotId): UInt256 {.contract, view.} -proc isProofRequired*(storage: Storage, id: SlotId): bool {.contract, view.} -proc willProofBeRequired*(storage: Storage, id: SlotId): bool {.contract, view.} -proc getChallenge*(storage: Storage, id: SlotId): array[32, byte] {.contract, view.} -proc getPointer*(storage: Storage, id: SlotId): uint8 {.contract, view.} - -proc submitProof*(storage: Storage, id: SlotId, proof: seq[byte]) {.contract.} -proc markProofAsMissing*(storage: Storage, id: SlotId, period: UInt256) {.contract.} diff --git a/codex/contracts/testtoken.nim b/codex/contracts/testtoken.nim deleted file mode 100644 index 175d82c2..00000000 --- a/codex/contracts/testtoken.nim +++ /dev/null @@ -1,10 +0,0 @@ -import pkg/chronos -import pkg/stint -import pkg/ethers - -type - TestToken* = ref object of Contract - -proc mint*(token: TestToken, holder: Address, amount: UInt256) {.contract.} -proc approve*(token: TestToken, spender: Address, amount: UInt256) {.contract.} -proc balanceOf*(token: TestToken, account: Address): UInt256 {.contract, view.} diff --git a/codex/discovery.nim b/codex/discovery.nim index 37b92bd2..47ac950d 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -8,21 +8,19 @@ ## those terms. import std/algorithm +import std/sequtils import pkg/chronos -import pkg/chronicles -import pkg/libp2p -import pkg/libp2p/routing_record -import pkg/libp2p/signed_envelope +import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope] import pkg/questionable import pkg/questionable/results import pkg/stew/shims/net import pkg/contractabi/address as ca -import pkg/libp2pdht/discv5/protocol as discv5 +import pkg/codexdht/discv5/[routing_table, protocol as discv5] import ./rng import ./errors -import ./formats +import ./logutils export discv5 @@ -35,10 +33,10 @@ logScope: type Discovery* = ref object of RootObj - protocol: discv5.Protocol # dht protocol + protocol*: discv5.Protocol # dht protocol key: PrivateKey # private key peerId: PeerId # the peer id of the local node - announceAddrs: seq[MultiAddress] # addresses announced as part of the provider records + announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any # address that the node can be connected on dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information @@ -57,7 +55,10 @@ proc toNodeId*(host: ca.Address): NodeId = proc findPeer*( d: Discovery, - peerId: PeerID): Future[?PeerRecord] {.async.} = + peerId: PeerId): Future[?PeerRecord] {.async.} = + trace "protocol.resolve..." + ## Find peer using the given Discovery object + ## let node = await d.protocol.resolve(toNodeId(peerId)) @@ -72,27 +73,22 @@ method find*( cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = ## Find block providers ## - - trace "Finding providers for block", cid without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: - trace "Error finding providers for block", cid, error = error.msg + warn "Error finding providers for block", cid, error = error.msg - return providers + return providers.filterIt( not (it.data.peerId == d.peerId) ) method provide*(d: Discovery, cid: Cid) {.async, base.} = - ## Provide a bock Cid + ## Provide a block Cid ## - - trace "Providing block", cid let nodes = await d.protocol.addProvider( cid.toNodeId(), d.providerRecord.get) if nodes.len <= 0: - trace "Couldn't provide to any nodes!" + warn "Couldn't provide to any nodes!" - trace "Provided to nodes", nodes = nodes.len method find*( d: Discovery, @@ -126,7 +122,9 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} = if nodes.len > 0: trace "Provided to nodes", nodes = nodes.len -method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base.} = +method removeProvider*( + d: Discovery, + peerId: PeerId): Future[void] {.base.} = ## Remove provider from providers table ## @@ -160,6 +158,10 @@ proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) = IpTransportProtocol.udpProtocol, port)])).expect("Should construct signed record").some + if not d.protocol.isNil: + d.protocol.updateRecord(d.dhtRecord) + .expect("Should update SPR") + proc start*(d: Discovery) {.async.} = d.protocol.open() await d.protocol.start() @@ -168,22 +170,36 @@ proc stop*(d: Discovery) {.async.} = await d.protocol.closeWait() proc new*( - T: type Discovery, - key: PrivateKey, - bindIp = ValidIpAddress.init(IPv4_any()), - bindPort = 0.Port, - announceAddrs: openArray[MultiAddress], - bootstrapNodes: openArray[SignedPeerRecord] = [], - store: Datastore = SQLiteDatastore.new(Memory) - .expect("Should not fail!")): T = + T: type Discovery, + key: PrivateKey, + bindIp = ValidIpAddress.init(IPv4_any()), + bindPort = 0.Port, + announceAddrs: openArray[MultiAddress], + bootstrapNodes: openArray[SignedPeerRecord] = [], + store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!") +): Discovery = + ## Create a new Discovery node instance for the given key and datastore + ## var - self = T( + self = Discovery( key: key, peerId: PeerId.init(key).expect("Should construct PeerId")) self.updateAnnounceRecord(announceAddrs) + # -------------------------------------------------------------------------- + # FIXME disable IP limits temporarily so we can run our workshop. Re-enable + # and figure out proper solution. + let discoveryConfig = DiscoveryConfig( + tableIpLimits: TableIpLimits( + tableIpLimit: high(uint), + bucketIpLimit:high(uint) + ), + bitsPerHop: DefaultBitsPerHop + ) + # -------------------------------------------------------------------------- + self.protocol = newProtocol( key, bindIp = bindIp.toNormalIp, @@ -191,6 +207,7 @@ proc new*( record = self.providerRecord.get, bootstrapRecords = bootstrapNodes, rng = Rng.instance(), - providers = ProvidersManager.new(store)) + providers = ProvidersManager.new(store), + config = discoveryConfig) self diff --git a/codex/erasure.nim b/codex/erasure.nim index b14d8e52..5dfebcd4 100644 --- a/codex/erasure.nim +++ b/codex/erasure.nim @@ -12,8 +12,14 @@ import ./erasure/backends/leopard export erasure -func leoEncoderProvider*(size, buffers, parity: int): EncoderBackend {.raises: [Defect].} = +func leoEncoderProvider*( + size, buffers, parity: int +): EncoderBackend {.raises: [Defect].} = + ## create new Leo Encoder LeoEncoderBackend.new(size, buffers, parity) -func leoDecoderProvider*(size, buffers, parity: int): DecoderBackend {.raises: [Defect].} = - LeoDecoderBackend.new(size, buffers, parity) +func leoDecoderProvider*( + size, buffers, parity: int +): DecoderBackend {.raises: [Defect].} = + ## create new Leo Decoder + LeoDecoderBackend.new(size, buffers, parity) diff --git a/codex/erasure/asyncbackend.nim b/codex/erasure/asyncbackend.nim new file mode 100644 index 00000000..4827806a --- /dev/null +++ b/codex/erasure/asyncbackend.nim @@ -0,0 +1,225 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/sequtils + +import pkg/taskpools +import pkg/taskpools/flowvars +import pkg/chronos +import pkg/chronos/threadsync +import pkg/questionable/results + +import ./backend +import ../errors +import ../logutils + +logScope: + topics = "codex asyncerasure" + +const + CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal + CompletitionRetryDelay = 10.millis + +type + EncoderBackendPtr = ptr EncoderBackend + DecoderBackendPtr = ptr DecoderBackend + + # Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy + EncodeTaskArgs = object + signal: ThreadSignalPtr + backend: EncoderBackendPtr + blockSize: int + ecM: int + + DecodeTaskArgs = object + signal: ThreadSignalPtr + backend: DecoderBackendPtr + blockSize: int + ecK: int + + SharedArrayHolder*[T] = object + data: ptr UncheckedArray[T] + size: int + + EncodeTaskResult = Result[SharedArrayHolder[byte], cstring] + DecodeTaskResult = Result[SharedArrayHolder[byte], cstring] + +proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult = + var + data = data.unsafeAddr + parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize)) + + try: + let res = args.backend[].encode(data[], parity) + + if res.isOk: + let + resDataSize = parity.len * args.blockSize + resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize)) + arrHolder = SharedArrayHolder[byte]( + data: resData, + size: resDataSize + ) + + for i in 0.. + self.store.getBlock( + BlockAddress.init(manifest.treeCid, i) + ).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K) + ) + + proc isFinished(): bool = pendingBlocks.len == 0 + + proc genNext(): Future[(?!bt.Block, int)] {.async.} = + let completedFut = await one(pendingBlocks) + if (let i = pendingBlocks.find(completedFut); i >= 0): + pendingBlocks.del(i) + return await completedFut + else: + let (_, index) = await completedFut + raise newException( + CatchableError, + "Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index) + + AsyncIter[(?!bt.Block, int)].new(genNext, isFinished) + +proc prepareEncodingData( + self: Erasure, + manifest: Manifest, + params: EncodingParams, + step: Natural, + data: ref seq[seq[byte]], + cids: ref seq[Cid], + emptyBlock: seq[byte]): Future[?!Natural] {.async.} = + ## Prepare data for encoding + ## + + let + strategy = params.strategy.init( + firstIndex = 0, + lastIndex = params.rounded - 1, + iterations = params.steps + ) + indicies = toSeq(strategy.getIndicies(step)) + pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount)) + + var resolved = 0 + for fut in pendingBlocksIter: + let (blkOrErr, idx) = await fut + without blk =? blkOrErr, err: + warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg + continue + + let pos = indexToPos(params.steps, idx, step) + shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data) + cids[idx] = blk.cid + + resolved.inc() + + for idx in indicies.filterIt(it >= manifest.blocksCount): + let pos = indexToPos(params.steps, idx, step) + trace "Padding with empty block", idx + shallowCopy(data[pos], emptyBlock) + without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err: + return failure(err) + cids[idx] = emptyBlockCid + + success(resolved.Natural) + +proc prepareDecodingData( + self: Erasure, + encoded: Manifest, + step: Natural, + data: ref seq[seq[byte]], + parityData: ref seq[seq[byte]], + cids: ref seq[Cid], + emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} = + ## Prepare data for decoding + ## `encoded` - the encoded manifest + ## `step` - the current step + ## `data` - the data to be prepared + ## `parityData` - the parityData to be prepared + ## `cids` - cids of prepared data + ## `emptyBlock` - the empty block to be used for padding + ## + + let + strategy = encoded.protectedStrategy.init( + firstIndex = 0, + lastIndex = encoded.blocksCount - 1, + iterations = encoded.steps + ) + indicies = toSeq(strategy.getIndicies(step)) + pendingBlocksIter = self.getPendingBlocks(encoded, indicies) + + var + dataPieces = 0 + parityPieces = 0 + resolved = 0 + for fut in pendingBlocksIter: + # Continue to receive blocks until we have just enough for decoding + # or no more blocks can arrive + if resolved >= encoded.ecK: + break + + let (blkOrErr, idx) = await fut + without blk =? blkOrErr, err: + trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg + continue + + let + pos = indexToPos(encoded.steps, idx, step) + + logScope: + cid = blk.cid + idx = idx + pos = pos + step = step + empty = blk.isEmpty + + cids[idx] = blk.cid + if idx >= encoded.rounded: + trace "Retrieved parity block" + shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data) + parityPieces.inc + else: + trace "Retrieved data block" + shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data) + dataPieces.inc + + resolved.inc + + return success (dataPieces.Natural, parityPieces.Natural) + +proc init*( + _: type EncodingParams, + manifest: Manifest, + ecK: Natural, ecM: Natural, + strategy: StrategyType): ?!EncodingParams = + if ecK > manifest.blocksCount: + let exc = (ref InsufficientBlocksError)( + msg: "Unable to encode manifest, not enough blocks, ecK = " & + $ecK & + ", blocksCount = " & + $manifest.blocksCount, + minSize: ecK.NBytes * manifest.blockSize) + return failure(exc) + + let + rounded = roundUp(manifest.blocksCount, ecK) + steps = divUp(rounded, ecK) + blocksCount = rounded + (steps * ecM) + + success EncodingParams( + ecK: ecK, + ecM: ecM, + rounded: rounded, + steps: steps, + blocksCount: blocksCount, + strategy: strategy + ) + +proc encodeData( + self: Erasure, + manifest: Manifest, + params: EncodingParams + ): Future[?!Manifest] {.async.} = + ## Encode blocks pointed to by the protected manifest + ## + ## `manifest` - the manifest to encode + ## + + logScope: + steps = params.steps + rounded_blocks = params.rounded + blocks_count = params.blocksCount + ecK = params.ecK + ecM = params.ecM + + var + cids = seq[Cid].new() + encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM) + emptyBlock = newSeq[byte](manifest.blockSize.int) + + cids[].setLen(params.blocksCount) try: - for i in 0..= encoded.K) or (idxPendingBlocks.len == 0): - break + data[].setLen(encoded.ecK) # set len to K + parity[].setLen(encoded.ecM) # set len to M - let - done = await one(idxPendingBlocks) - idx = pendingBlocks.find(done) + without (dataPieces, _) =? + (await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err: + trace "Unable to prepare data", error = err.msg + return failure(err) - idxPendingBlocks.del(idxPendingBlocks.find(done)) - - without blk =? (await done), error: - trace "Failed retrieving block", error = error.msg - continue - - if idx >= encoded.K: - trace "Retrieved parity block", cid = blk.cid, idx - shallowCopy(parityData[idx - encoded.K], if blk.isEmpty: emptyBlock else: blk.data) - else: - trace "Retrieved data block", cid = blk.cid, idx - shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data) - - resolved.inc - - let - dataPieces = data.filterIt( it.len > 0 ).len - parityPieces = parityData.filterIt( it.len > 0 ).len - - if dataPieces >= encoded.K: - trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces + if dataPieces >= encoded.ecK: + trace "Retrieved all the required data blocks" continue - trace "Erasure decoding data", data = dataPieces, parity = parityPieces - if ( - let err = decoder.decode(data, parityData, recovered); - err.isErr): - trace "Unable to decode manifest!", err = $err.error - return failure($err.error) + trace "Erasure decoding data" - for i in 0.. i < tree.leavesCount) + + if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption: + return failure(err) + + let decoded = Manifest.new(encoded) return decoded.success @@ -269,9 +470,13 @@ proc new*( T: type Erasure, store: BlockStore, encoderProvider: EncoderProvider, - decoderProvider: DecoderProvider): Erasure = + decoderProvider: DecoderProvider, + taskpool: Taskpool): Erasure = + ## Create a new Erasure instance for encoding and decoding manifests + ## Erasure( store: store, encoderProvider: encoderProvider, - decoderProvider: decoderProvider) + decoderProvider: decoderProvider, + taskpool: taskpool) diff --git a/codex/errors.nim b/codex/errors.nim index b3777f2a..9947d4b7 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -7,16 +7,43 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +import std/options + import pkg/stew/results +import pkg/chronos +import pkg/questionable/results + +export results type CodexError* = object of CatchableError # base codex error CodexResult*[T] = Result[T, ref CodexError] -template mapFailure*( - exp: untyped, - exc: typed = type CodexError): untyped = +template mapFailure*[T, V, E]( + exp: Result[T, V], + exc: typedesc[E], +): Result[T, ref CatchableError] = ## Convert `Result[T, E]` to `Result[E, ref CatchableError]` ## - ((exp.mapErr do (e: auto) -> ref CatchableError: (ref exc)(msg: $e))) + exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e)) + +template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] = + mapFailure(exp, CodexError) + +# TODO: using a template here, causes bad codegen +func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} = + if exp.isSome: + success exp.get + else: + T.failure("Option is None") + +proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} = + try: + await allFuturesThrowing(fut) + except CancelledError as exc: + raise exc + except CatchableError as exc: + return failure(exc.msg) + + return success() diff --git a/codex/formats.nim b/codex/formats.nim deleted file mode 100644 index ec79dabe..00000000 --- a/codex/formats.nim +++ /dev/null @@ -1,28 +0,0 @@ -## Nim-Codex -## Copyright (c) 2022 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -import std/strutils - -import pkg/chronicles -import pkg/libp2p - -func shortLog*(cid: Cid): string = - ## Returns compact string representation of ``pid``. - var scid = $cid - if len(scid) > 10: - scid[3] = '*' - - when (NimMajor, NimMinor) > (1, 4): - scid.delete(4 .. scid.high - 6) - else: - scid.delete(4, scid.high - 6) - - scid - -chronicles.formatIt(Cid): shortLog(it) diff --git a/codex/indexingstrategy.nim b/codex/indexingstrategy.nim new file mode 100644 index 00000000..d8eeba58 --- /dev/null +++ b/codex/indexingstrategy.nim @@ -0,0 +1,97 @@ +import ./errors +import ./utils +import ./utils/asynciter + +{.push raises: [].} + +type + StrategyType* = enum + # Simplest approach: + # 0 => 0, 1, 2 + # 1 => 3, 4, 5 + # 2 => 6, 7, 8 + LinearStrategy, + + # Stepped indexing: + # 0 => 0, 3, 6 + # 1 => 1, 4, 7 + # 2 => 2, 5, 8 + SteppedStrategy + + # Representing a strategy for grouping indices (of blocks usually) + # Given an interation-count as input, will produce a seq of + # selected indices. + + IndexingError* = object of CodexError + IndexingWrongIndexError* = object of IndexingError + IndexingWrongIterationsError* = object of IndexingError + + IndexingStrategy* = object + strategyType*: StrategyType + firstIndex*: int # Lowest index that can be returned + lastIndex*: int # Highest index that can be returned + iterations*: int # getIndices(iteration) will run from 0 ..< iterations + step*: int + +func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} = + if iteration >= self.iterations: + raise newException( + IndexingError, + "Indexing iteration can't be greater than or equal to iterations.") + +func getIter(first, last, step: int): Iter[int] = + {.cast(noSideEffect).}: + Iter[int].new(first, last, step) + +func getLinearIndicies( + self: IndexingStrategy, + iteration: int): Iter[int] {.raises: [IndexingError].} = + self.checkIteration(iteration) + + let + first = self.firstIndex + iteration * self.step + last = min(first + self.step - 1, self.lastIndex) + + getIter(first, last, 1) + +func getSteppedIndicies( + self: IndexingStrategy, + iteration: int): Iter[int] {.raises: [IndexingError].} = + self.checkIteration(iteration) + + let + first = self.firstIndex + iteration + last = self.lastIndex + + getIter(first, last, self.iterations) + +func getIndicies*( + self: IndexingStrategy, + iteration: int): Iter[int] {.raises: [IndexingError].} = + + case self.strategyType + of StrategyType.LinearStrategy: + self.getLinearIndicies(iteration) + of StrategyType.SteppedStrategy: + self.getSteppedIndicies(iteration) + +func init*( + strategy: StrategyType, + firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} = + + if firstIndex > lastIndex: + raise newException( + IndexingWrongIndexError, + "firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")") + + if iterations <= 0: + raise newException( + IndexingWrongIterationsError, + "iterations (" & $iterations & ") must be greater than zero.") + + IndexingStrategy( + strategyType: strategy, + firstIndex: firstIndex, + lastIndex: lastIndex, + iterations: iterations, + step: divUp((lastIndex - firstIndex + 1), iterations)) diff --git a/codex/logutils.nim b/codex/logutils.nim new file mode 100644 index 00000000..eb75e906 --- /dev/null +++ b/codex/logutils.nim @@ -0,0 +1,242 @@ +## logutils is a module that has several goals: +## 1. Fix json logging output (run with `--log-format=json`) which was +## effectively broken for many types using default Chronicles json +## serialization. +## 2. Ability to specify log output for textlines and json sinks together or +## separately +## - This is useful if consuming json in some kind of log parser and need +## valid json with real values +## - eg a shortened Cid is nice to see in a text log in stdout, but won't +## provide a real Cid when parsed in json +## 4. Remove usages of `nim-json-serialization` from the codebase +## 5. Remove need to declare `writeValue` for new types +## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent +## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467) +## +## When declaring a new type, one should consider importing the `codex/logutils` +## module, and specifying `formatIt`. If textlines log output and json log output +## need to be different, overload `formatIt` and specify a `LogFormat`. If json +## serialization is needed, it can be declared with a `%` proc. `logutils` +## imports and exports `nim-serde` which handles the de/serialization, examples +## below. **Only `codex/logutils` needs to be imported.** +## +## Using `logutils` in the Codex codebase: +## - Instead of importing `pkg/chronicles`, import `pkg/codex/logutils` +## - most of `chronicles` is exported by `logutils` +## - Instead of importing `std/json`, import `pkg/serde/json` +## - `std/json` is exported by `serde` which is exported by `logutils` +## - Instead of importing `pkg/nim-json-serialization`, import +## `pkg/serde/json` or use codex-specific overloads by importing `utils/json` +## - one of the goals is to remove the use of `nim-json-serialization` +## +## ```nim +## import pkg/codex/logutils +## +## type +## BlockAddress* = object +## case leaf*: bool +## of true: +## treeCid* {.serialize.}: Cid +## index* {.serialize.}: Natural +## else: +## cid* {.serialize.}: Cid +## +## logutils.formatIt(LogFormat.textLines, BlockAddress): +## if it.leaf: +## "treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index +## else: +## "cid: " & shortLog($it.cid) +## +## logutils.formatIt(LogFormat.json, BlockAddress): %it +## +## # chronicles textlines output +## TRC test tid=14397405 ba="treeCid: zb2*fndjU1, index: 0" +## # chronicles json output +## {"lvl":"TRC","msg":"test","tid":14397405,"ba":{"treeCid":"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1","index":0}} +## ``` +## In this case, `BlockAddress` is just an object, so `nim-serde` can handle +## serializing it without issue (only fields annotated with `{.serialize.}` will +## serialize (aka opt-in serialization)). +## +## If one so wished, another option for the textlines log output, would be to +## simply `toString` the serialised json: +## ```nim +## logutils.formatIt(LogFormat.textLines, BlockAddress): $ %it +## # or, more succinctly: +## logutils.formatIt(LogFormat.textLines, BlockAddress): it.toJson +## ``` +## In that case, both the textlines and json sinks would have the same output, +## so we could reduce this even further by not specifying a `LogFormat`: +## ```nim +## type +## BlockAddress* = object +## case leaf*: bool +## of true: +## treeCid* {.serialize.}: Cid +## index* {.serialize.}: Natural +## else: +## cid* {.serialize.}: Cid +## +## logutils.formatIt(BlockAddress): %it +## +## # chronicles textlines output +## TRC test tid=14400673 ba="{\"treeCid\":\"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1\",\"index\":0}" +## # chronicles json output +## {"lvl":"TRC","msg":"test","tid":14400673,"ba":{"treeCid":"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1","index":0}} +## ``` + +import std/options +import std/sequtils +import std/strutils +import std/sugar +import std/typetraits + +import pkg/chronicles except toJson, `%` +from pkg/libp2p import Cid, MultiAddress, `$` +import pkg/questionable +import pkg/questionable/results +import ./utils/json except formatIt # TODO: remove exception? +import pkg/stew/byteutils +import pkg/stint +import pkg/upraises + +export byteutils +export chronicles except toJson, formatIt, `%` +export questionable +export sequtils +export json except formatIt +export strutils +export sugar +export upraises +export results + +func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string = + ## Returns compact string representation of ``long``. + var short = long + let minLen = start + ellipses.len + stop + if len(short) > minLen: + short.insert(ellipses, start) + + when (NimMajor, NimMinor) > (1, 4): + short.delete(start + ellipses.len .. short.high - stop) + else: + short.delete(start + ellipses.len, short.high - stop) + + short + +func shortHexLog*(long: string): string = + if long[0..1] == "0x": result &= "0x" + result &= long[2..long.high].shortLog("..", 4, 4) + +func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string = + v.to0xHex.shortHexLog + +func short0xHexLog*[T: distinct](v: T): string = + type BaseType = T.distinctBase + BaseType(v).short0xHexLog + +func short0xHexLog*[U: distinct, T: seq[U]](v: T): string = + type BaseType = U.distinctBase + "@[" & v.map(x => BaseType(x).short0xHexLog).join(",") & "]" + +func to0xHexLog*[T: distinct](v: T): string = + type BaseType = T.distinctBase + BaseType(v).to0xHex + +func to0xHexLog*[U: distinct, T: seq[U]](v: T): string = + type BaseType = U.distinctBase + "@[" & v.map(x => BaseType(x).to0xHex).join(",") & "]" + +proc formatTextLineSeq*(val: seq[string]): string = + "@[" & val.join(", ") & "]" + +template formatIt*(format: LogFormat, T: typedesc, body: untyped) = + # Provides formatters for logging with Chronicles for the given type and + # `LogFormat`. + # NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden + # since the base `setProperty` is generic using `auto` and conflicts with + # providing a generic `seq` and `Option` override. + when format == LogFormat.json: + proc formatJsonOption(val: ?T): JsonNode = + if it =? val: + json.`%`(body) + else: + newJNull() + + proc formatJsonResult*(val: ?!T): JsonNode = + without it =? val, error: + let jObj = newJObject() + jObj["error"] = newJString(error.msg) + return jObj + json.`%`(body) + + proc setProperty*(r: var JsonRecord, key: string, res: ?!T) = + var it {.inject, used.}: T + setProperty(r, key, res.formatJsonResult) + + proc setProperty*(r: var JsonRecord, key: string, opt: ?T) = + var it {.inject, used.}: T + let v = opt.formatJsonOption + setProperty(r, key, v) + + proc setProperty*(r: var JsonRecord, key: string, opts: seq[?T]) = + var it {.inject, used.}: T + let v = opts.map(opt => opt.formatJsonOption) + setProperty(r, key, json.`%`(v)) + + proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) = + var it {.inject, used.}: T + let v = val.map(it => body) + setProperty(r, key, json.`%`(v)) + + proc setProperty*(r: var JsonRecord, key: string, val: T) {.upraises:[ValueError, IOError].} = + var it {.inject, used.}: T = val + let v = body + setProperty(r, key, json.`%`(v)) + + elif format == LogFormat.textLines: + proc formatTextLineOption*(val: ?T): string = + var v = "none(" & $T & ")" + if it =? val: + v = "some(" & $(body) & ")" # that I used to know :) + v + + proc formatTextLineResult*(val: ?!T): string = + without it =? val, error: + return "Error: " & error.msg + $(body) + + proc setProperty*(r: var TextLineRecord, key: string, res: ?!T) = + var it {.inject, used.}: T + setProperty(r, key, res.formatTextLineResult) + + proc setProperty*(r: var TextLineRecord, key: string, opt: ?T) = + var it {.inject, used.}: T + let v = opt.formatTextLineOption + setProperty(r, key, v) + + proc setProperty*(r: var TextLineRecord, key: string, opts: seq[?T]) = + var it {.inject, used.}: T + let v = opts.map(opt => opt.formatTextLineOption) + setProperty(r, key, v.formatTextLineSeq) + + proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) = + var it {.inject, used.}: T + let v = val.map(it => body) + setProperty(r, key, v.formatTextLineSeq) + + proc setProperty*(r: var TextLineRecord, key: string, val: T) {.upraises:[ValueError, IOError].} = + var it {.inject, used.}: T = val + let v = body + setProperty(r, key, v) + +template formatIt*(T: type, body: untyped) {.dirty.} = + formatIt(LogFormat.textLines, T): body + formatIt(LogFormat.json, T): body + +formatIt(LogFormat.textLines, Cid): shortLog($it) +formatIt(LogFormat.json, Cid): $it +formatIt(UInt256): $it +formatIt(MultiAddress): $it +formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog +formatIt(LogFormat.json, array[32, byte]): it.to0xHex diff --git a/codex/manifest.nim b/codex/manifest.nim index cf33453a..3cd9219e 100644 --- a/codex/manifest.nim +++ b/codex/manifest.nim @@ -1,5 +1,4 @@ import ./manifest/coders import ./manifest/manifest -import ./manifest/types -export types, manifest, coders +export manifest, coders diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index f8e0a74b..e36039c7 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -14,19 +14,20 @@ import pkg/upraises push: {.upraises: [].} import std/tables +import std/sequtils import pkg/libp2p import pkg/questionable import pkg/questionable/results -import pkg/chronicles import pkg/chronos import ./manifest import ../errors import ../blocktype -import ./types +import ../logutils +import ../indexingstrategy -func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] = +proc encode*(manifest: Manifest): ?!seq[byte] = ## Encode the manifest into a ``ManifestCodec`` ## multicodec container (Dag-pb) for now ## @@ -34,54 +35,67 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] = ? manifest.verify() var pbNode = initProtoBuffer() - for c in manifest.blocks: - var pbLink = initProtoBuffer() - pbLink.write(1, c.data.buffer) # write Cid links - pbLink.finish() - pbNode.write(2, pbLink) - # NOTE: The `Data` field in the the `dag-pb` # contains the following protobuf `Message` # # ```protobuf - # Message ErasureInfo { - # optional uint32 K = 1; # number of encoded blocks - # optional uint32 M = 2; # number of parity blocks - # optional bytes cid = 3; # cid of the original dataset - # optional uint32 original = 4; # number of original blocks + # Message VerificationInfo { + # bytes verifyRoot = 1; # Decimal encoded field-element + # repeated bytes slotRoots = 2; # Decimal encoded field-elements # } + # Message ErasureInfo { + # optional uint32 ecK = 1; # number of encoded blocks + # optional uint32 ecM = 2; # number of parity blocks + # optional bytes originalTreeCid = 3; # cid of the original dataset + # optional uint32 originalDatasetSize = 4; # size of the original dataset + # optional VerificationInformation verification = 5; # verification information + # } + # # Message Header { - # optional bytes rootHash = 1; # the root (tree) hash + # optional bytes treeCid = 1; # cid (root) of the tree # optional uint32 blockSize = 2; # size of a single block - # optional uint32 blocksLen = 3; # total amount of blocks - # optional ErasureInfo erasure = 4; # erasure coding info - # optional uint64 originalBytes = 5;# exact file size + # optional uint64 datasetSize = 3; # size of the dataset + # optional codec: MultiCodec = 4; # Dataset codec + # optional hcodec: MultiCodec = 5 # Multihash codec + # optional version: CidVersion = 6; # Cid version + # optional ErasureInfo erasure = 7; # erasure coding info # } # ``` # - - let cid = !manifest.rootHash + # var treeRootVBuf = initVBuffer() var header = initProtoBuffer() - header.write(1, cid.data.buffer) + header.write(1, manifest.treeCid.data.buffer) header.write(2, manifest.blockSize.uint32) - header.write(3, manifest.len.uint32) - header.write(5, manifest.originalBytes.uint64) + header.write(3, manifest.datasetSize.uint64) + header.write(4, manifest.codec.uint32) + header.write(5, manifest.hcodec.uint32) + header.write(6, manifest.version.uint32) if manifest.protected: var erasureInfo = initProtoBuffer() - erasureInfo.write(1, manifest.K.uint32) - erasureInfo.write(2, manifest.M.uint32) - erasureInfo.write(3, manifest.originalCid.data.buffer) - erasureInfo.write(4, manifest.originalLen.uint32) + erasureInfo.write(1, manifest.ecK.uint32) + erasureInfo.write(2, manifest.ecM.uint32) + erasureInfo.write(3, manifest.originalTreeCid.data.buffer) + erasureInfo.write(4, manifest.originalDatasetSize.uint64) + erasureInfo.write(5, manifest.protectedStrategy.uint32) + + if manifest.verifiable: + var verificationInfo = initProtoBuffer() + verificationInfo.write(1, manifest.verifyRoot.data.buffer) + for slotRoot in manifest.slotRoots: + verificationInfo.write(2, slotRoot.data.buffer) + verificationInfo.write(3, manifest.cellSize.uint32) + verificationInfo.write(4, manifest.verifiableStrategy.uint32) + erasureInfo.write(6, verificationInfo) + erasureInfo.finish() + header.write(7, erasureInfo) - header.write(4, erasureInfo) - - pbNode.write(1, header) # set the rootHash Cid as the data field + pbNode.write(1, header) # set the treeCid as the data field pbNode.finish() return pbNode.buffer.success -func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest = +proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = ## Decode a manifest from a data blob ## @@ -89,105 +103,131 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest = pbNode = initProtoBuffer(data) pbHeader: ProtoBuffer pbErasureInfo: ProtoBuffer - rootHash: seq[byte] - originalCid: seq[byte] - originalBytes: uint64 + pbVerificationInfo: ProtoBuffer + treeCidBuf: seq[byte] + originalTreeCid: seq[byte] + datasetSize: uint64 + codec: uint32 + hcodec: uint32 + version: uint32 blockSize: uint32 - blocksLen: uint32 - originalLen: uint32 - K, M: uint32 - blocks: seq[Cid] + originalDatasetSize: uint64 + ecK, ecM: uint32 + protectedStrategy: uint32 + verifyRoot: seq[byte] + slotRoots: seq[seq[byte]] + cellSize: uint32 + verifiableStrategy: uint32 # Decode `Header` message if pbNode.getField(1, pbHeader).isErr: return failure("Unable to decode `Header` from dag-pb manifest!") # Decode `Header` contents - if pbHeader.getField(1, rootHash).isErr: - return failure("Unable to decode `rootHash` from manifest!") + if pbHeader.getField(1, treeCidBuf).isErr: + return failure("Unable to decode `treeCid` from manifest!") if pbHeader.getField(2, blockSize).isErr: return failure("Unable to decode `blockSize` from manifest!") - if pbHeader.getField(3, blocksLen).isErr: - return failure("Unable to decode `blocksLen` from manifest!") + if pbHeader.getField(3, datasetSize).isErr: + return failure("Unable to decode `datasetSize` from manifest!") - if pbHeader.getField(5, originalBytes).isErr: - return failure("Unable to decode `originalBytes` from manifest!") + if pbHeader.getField(4, codec).isErr: + return failure("Unable to decode `codec` from manifest!") - if pbHeader.getField(4, pbErasureInfo).isErr: + if pbHeader.getField(5, hcodec).isErr: + return failure("Unable to decode `hcodec` from manifest!") + + if pbHeader.getField(6, version).isErr: + return failure("Unable to decode `version` from manifest!") + + if pbHeader.getField(7, pbErasureInfo).isErr: return failure("Unable to decode `erasureInfo` from manifest!") - if pbErasureInfo.buffer.len > 0: - if pbErasureInfo.getField(1, K).isErr: + let protected = pbErasureInfo.buffer.len > 0 + var verifiable = false + if protected: + if pbErasureInfo.getField(1, ecK).isErr: return failure("Unable to decode `K` from manifest!") - if pbErasureInfo.getField(2, M).isErr: + if pbErasureInfo.getField(2, ecM).isErr: return failure("Unable to decode `M` from manifest!") - if pbErasureInfo.getField(3, originalCid).isErr: - return failure("Unable to decode `originalCid` from manifest!") + if pbErasureInfo.getField(3, originalTreeCid).isErr: + return failure("Unable to decode `originalTreeCid` from manifest!") - if pbErasureInfo.getField(4, originalLen).isErr: - return failure("Unable to decode `originalLen` from manifest!") + if pbErasureInfo.getField(4, originalDatasetSize).isErr: + return failure("Unable to decode `originalDatasetSize` from manifest!") - let rootHashCid = ? Cid.init(rootHash).mapFailure - var linksBuf: seq[seq[byte]] - if pbNode.getRepeatedField(2, linksBuf).isOk: - for pbLinkBuf in linksBuf: - var - blocksBuf: seq[seq[byte]] - blockBuf: seq[byte] - pbLink = initProtoBuffer(pbLinkBuf) + if pbErasureInfo.getField(5, protectedStrategy).isErr: + return failure("Unable to decode `protectedStrategy` from manifest!") - if pbLink.getField(1, blockBuf).isOk: - blocks.add(? Cid.init(blockBuf).mapFailure) + if pbErasureInfo.getField(6, pbVerificationInfo).isErr: + return failure("Unable to decode `verificationInfo` from manifest!") - if blocksLen.int != blocks.len: - return failure("Total blocks and length of blocks in header don't match!") + verifiable = pbVerificationInfo.buffer.len > 0 + if verifiable: + if pbVerificationInfo.getField(1, verifyRoot).isErr: + return failure("Unable to decode `verifyRoot` from manifest!") - var - self = Manifest( - rootHash: rootHashCid.some, - originalBytes: originalBytes.int, - blockSize: blockSize.int, - blocks: blocks, - hcodec: (? rootHashCid.mhash.mapFailure).mcodec, - codec: rootHashCid.mcodec, - version: rootHashCid.cidver, - protected: pbErasureInfo.buffer.len > 0) + if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr: + return failure("Unable to decode `slotRoots` from manifest!") - if self.protected: - self.K = K.int - self.M = M.int - self.originalCid = ? Cid.init(originalCid).mapFailure - self.originalLen = originalLen.int + if pbVerificationInfo.getField(3, cellSize).isErr: + return failure("Unable to decode `cellSize` from manifest!") + + if pbVerificationInfo.getField(4, verifiableStrategy).isErr: + return failure("Unable to decode `verifiableStrategy` from manifest!") + + let + treeCid = ? Cid.init(treeCidBuf).mapFailure + + let + self = if protected: + Manifest.new( + treeCid = treeCid, + datasetSize = datasetSize.NBytes, + blockSize = blockSize.NBytes, + version = CidVersion(version), + hcodec = hcodec.MultiCodec, + codec = codec.MultiCodec, + ecK = ecK.int, + ecM = ecM.int, + originalTreeCid = ? Cid.init(originalTreeCid).mapFailure, + originalDatasetSize = originalDatasetSize.NBytes, + strategy = StrategyType(protectedStrategy)) + else: + Manifest.new( + treeCid = treeCid, + datasetSize = datasetSize.NBytes, + blockSize = blockSize.NBytes, + version = CidVersion(version), + hcodec = hcodec.MultiCodec, + codec = codec.MultiCodec) ? self.verify() + + if verifiable: + let + verifyRootCid = ? Cid.init(verifyRoot).mapFailure + slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure) + + return Manifest.new( + manifest = self, + verifyRoot = verifyRootCid, + slotRoots = slotRootCids, + cellSize = cellSize.NBytes, + strategy = StrategyType(verifiableStrategy) + ) + self.success -proc encode*( - self: Manifest, - encoder = ManifestContainers[$DagPBCodec]): ?!seq[byte] = - ## Encode a manifest using `encoder` - ## - - if self.rootHash.isNone: - ? self.makeRoot() - - encoder.encode(self) - -func decode*( - _: type Manifest, - data: openArray[byte], - decoder = ManifestContainers[$DagPBCodec]): ?!Manifest = +func decode*(_: type Manifest, blk: Block): ?!Manifest = ## Decode a manifest using `decoder` ## - decoder.decode(data) + if not ? blk.cid.isManifest: + return failure "Cid not a manifest codec" -func decode*(_: type Manifest, blk: Block): ?!Manifest = - without contentType =? blk.cid.contentType() and - containerType =? ManifestContainers.?[$contentType]: - return failure "CID has invalid content type for manifest" - Manifest.decode(blk.data, containerType) + Manifest.decode(blk.data) diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 8be8d4bb..93aa5ba5 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -14,209 +14,327 @@ import pkg/upraises push: {.upraises: [].} import pkg/libp2p/protobuf/minprotobuf -import pkg/libp2p -import pkg/questionable +import pkg/libp2p/[cid, multihash, multicodec] import pkg/questionable/results -import pkg/chronicles import ../errors import ../utils +import ../utils/json +import ../units import ../blocktype -import ./types -import ./coders +import ../indexingstrategy +import ../logutils + + +# TODO: Manifest should be reworked to more concrete types, +# perhaps using inheritance +type + Manifest* = ref object of RootObj + treeCid {.serialize.}: Cid # Root of the merkle tree + datasetSize {.serialize.}: NBytes # Total size of all blocks + blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed) + codec: MultiCodec # Dataset codec + hcodec: MultiCodec # Multihash codec + version: CidVersion # Cid version + case protected {.serialize.}: bool # Protected datasets have erasure coded info + of true: + ecK: int # Number of blocks to encode + ecM: int # Number of resulting parity blocks + originalTreeCid: Cid # The original root of the dataset being erasure coded + originalDatasetSize: NBytes + protectedStrategy: StrategyType # Indexing strategy used to build the slot roots + case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs + of true: + verifyRoot: Cid # Root of the top level merkle tree built from slot roots + slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks + cellSize: NBytes # Size of each slot cell + verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots + else: + discard + else: + discard + +############################################################ +# Accessors +############################################################ + +func blockSize*(self: Manifest): NBytes = + self.blockSize + +func datasetSize*(self: Manifest): NBytes = + self.datasetSize + +func version*(self: Manifest): CidVersion = + self.version + +func hcodec*(self: Manifest): MultiCodec = + self.hcodec + +func codec*(self: Manifest): MultiCodec = + self.codec + +func protected*(self: Manifest): bool = + self.protected + +func ecK*(self: Manifest): int = + self.ecK + +func ecM*(self: Manifest): int = + self.ecM + +func originalTreeCid*(self: Manifest): Cid = + self.originalTreeCid + +func originalBlocksCount*(self: Manifest): int = + divUp(self.originalDatasetSize.int, self.blockSize.int) + +func originalDatasetSize*(self: Manifest): NBytes = + self.originalDatasetSize + +func treeCid*(self: Manifest): Cid = + self.treeCid + +func blocksCount*(self: Manifest): int = + divUp(self.datasetSize.int, self.blockSize.int) + +func verifiable*(self: Manifest): bool = + bool (self.protected and self.verifiable) + +func verifyRoot*(self: Manifest): Cid = + self.verifyRoot + +func slotRoots*(self: Manifest): seq[Cid] = + self.slotRoots + +func numSlots*(self: Manifest): int = + self.ecK + self.ecM + +func cellSize*(self: Manifest): NBytes = + self.cellSize + +func protectedStrategy*(self: Manifest): StrategyType = + self.protectedStrategy + +func verifiableStrategy*(self: Manifest): StrategyType = + self.verifiableStrategy + +func numSlotBlocks*(self: Manifest): int = + divUp(self.blocksCount, self.numSlots) ############################################################ # Operations on block list ############################################################ -func len*(self: Manifest): int = - self.blocks.len - -func `[]`*(self: Manifest, i: Natural): Cid = - self.blocks[i] - -func `[]=`*(self: var Manifest, i: Natural, item: Cid) = - self.rootHash = Cid.none - self.blocks[i] = item - -func `[]`*(self: Manifest, i: BackwardsIndex): Cid = - self.blocks[self.len - i.int] - -func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) = - self.rootHash = Cid.none - self.blocks[self.len - i.int] = item - -proc add*(self: Manifest, cid: Cid) = - assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks - self.rootHash = Cid.none - trace "Adding cid to manifest", cid - self.blocks.add(cid) - self.originalBytes = self.blocks.len * self.blockSize - -iterator items*(self: Manifest): Cid = - for b in self.blocks: - yield b - -iterator pairs*(self: Manifest): tuple[key: int, val: Cid] = - for pair in self.blocks.pairs(): - yield pair - -func contains*(self: Manifest, cid: Cid): bool = - cid in self.blocks +func isManifest*(cid: Cid): ?!bool = + success (ManifestCodec == ? cid.contentType().mapFailure(CodexError)) +func isManifest*(mc: MultiCodec): ?!bool = + success mc == ManifestCodec ############################################################ # Various sizes and verification ############################################################ -func bytes*(self: Manifest, pad = true): int = - ## Compute how many bytes corresponding StoreStream(Manifest, pad) will return - if pad or self.protected: - self.len * self.blockSize - else: - self.originalBytes - func rounded*(self: Manifest): int = ## Number of data blocks in *protected* manifest including padding at the end - roundUp(self.originalLen, self.K) + roundUp(self.originalBlocksCount, self.ecK) func steps*(self: Manifest): int = ## Number of EC groups in *protected* manifest - divUp(self.originalLen, self.K) + divUp(self.rounded, self.ecK) func verify*(self: Manifest): ?!void = ## Check manifest correctness ## - let originalLen = (if self.protected: self.originalLen else: self.len) - if divUp(self.originalBytes, self.blockSize) != originalLen: - return failure newException(CodexError, "Broken manifest: wrong originalBytes") - - if self.protected and (self.len != self.steps * (self.K + self.M)): - return failure newException(CodexError, "Broken manifest: wrong originalLen") + if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)): + return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount") return success() +func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} = + self.treeCid.success -############################################################ -# Cid computation -############################################################ - -template hashBytes(mh: MultiHash): seq[byte] = - ## get the hash bytes of a multihash object - ## - - mh.data.buffer[mh.dpos..(mh.dpos + mh.size - 1)] - -proc makeRoot*(self: Manifest): ?!void = - ## Create a tree hash root of the contained - ## block hashes - ## - - var - stack: seq[MultiHash] - - for cid in self: - stack.add(? cid.mhash.mapFailure) - - while stack.len > 1: - let - (b1, b2) = (stack.pop(), stack.pop()) - mh = ? MultiHash.digest( - $self.hcodec, - (b1.hashBytes() & b2.hashBytes())) - .mapFailure - stack.add(mh) - - if stack.len == 1: - let cid = ? Cid.init( - self.version, - self.codec, - (? EmptyDigests[self.version][self.hcodec].catch)) - .mapFailure - - self.rootHash = cid.some - - success() - -proc cid*(self: Manifest): ?!Cid = - ## Generate a root hash using the treehash algorithm - ## - - if self.rootHash.isNone: - ? self.makeRoot() - - (!self.rootHash).success +func `==`*(a, b: Manifest): bool = + (a.treeCid == b.treeCid) and + (a.datasetSize == b.datasetSize) and + (a.blockSize == b.blockSize) and + (a.version == b.version) and + (a.hcodec == b.hcodec) and + (a.codec == b.codec) and + (a.protected == b.protected) and + (if a.protected: + (a.ecK == b.ecK) and + (a.ecM == b.ecM) and + (a.originalTreeCid == b.originalTreeCid) and + (a.originalDatasetSize == b.originalDatasetSize) and + (a.protectedStrategy == b.protectedStrategy) and + (a.verifiable == b.verifiable) and + (if a.verifiable: + (a.verifyRoot == b.verifyRoot) and + (a.slotRoots == b.slotRoots) and + (a.cellSize == b.cellSize) and + (a.verifiableStrategy == b.verifiableStrategy) + else: + true) + else: + true) +func `$`*(self: Manifest): string = + "treeCid: " & $self.treeCid & + ", datasetSize: " & $self.datasetSize & + ", blockSize: " & $self.blockSize & + ", version: " & $self.version & + ", hcodec: " & $self.hcodec & + ", codec: " & $self.codec & + ", protected: " & $self.protected & + (if self.protected: + ", ecK: " & $self.ecK & + ", ecM: " & $self.ecM & + ", originalTreeCid: " & $self.originalTreeCid & + ", originalDatasetSize: " & $self.originalDatasetSize & + ", verifiable: " & $self.verifiable & + (if self.verifiable: + ", verifyRoot: " & $self.verifyRoot & + ", slotRoots: " & $self.slotRoots + else: + "") + else: + "") ############################################################ # Constructors ############################################################ -proc new*( +func new*( T: type Manifest, - blocks: openArray[Cid] = [], - protected = false, - version = CIDv1, - hcodec = multiCodec("sha2-256"), - codec = multiCodec("raw"), - blockSize = BlockSize): ?!T = - ## Create a manifest using array of `Cid`s - ## - - if hcodec notin EmptyDigests[version]: - return failure("Unsupported manifest hash codec!") + treeCid: Cid, + blockSize: NBytes, + datasetSize: NBytes, + version: CidVersion = CIDv1, + hcodec = Sha256HashCodec, + codec = BlockCodec, + protected = false): Manifest = T( - blocks: @blocks, + treeCid: treeCid, + blockSize: blockSize, + datasetSize: datasetSize, version: version, codec: codec, hcodec: hcodec, - blockSize: blockSize, - originalBytes: blocks.len * blockSize, - protected: protected).success + protected: protected) -proc new*( +func new*( T: type Manifest, manifest: Manifest, - K, M: int): ?!Manifest = + treeCid: Cid, + datasetSize: NBytes, + ecK, ecM: int, + strategy = SteppedStrategy): Manifest = ## Create an erasure protected dataset from an - ## un-protected one + ## unprotected one ## - var - self = Manifest( - version: manifest.version, - codec: manifest.codec, - hcodec: manifest.hcodec, - originalBytes: manifest.originalBytes, - blockSize: manifest.blockSize, - protected: true, - K: K, M: M, - originalCid: ? manifest.cid, - originalLen: manifest.len) + Manifest( + treeCid: treeCid, + datasetSize: datasetSize, + version: manifest.version, + codec: manifest.codec, + hcodec: manifest.hcodec, + blockSize: manifest.blockSize, + protected: true, + ecK: ecK, ecM: ecM, + originalTreeCid: manifest.treeCid, + originalDatasetSize: manifest.datasetSize, + protectedStrategy: strategy) - let - encodedLen = self.rounded + (self.steps * M) - - self.blocks = newSeq[Cid](encodedLen) - - # copy original manifest blocks - for i in 0..= self.leavesCount: + return failure "Invalid leaf index " & $i + + let + leaf = self.leaves[i] + mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure + + Cid.init(version, dataCodec, mhash).mapFailure + +proc `$`*(self: CodexTree): string = + let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none" + "CodexTree(" & + " root: " & root & + ", leavesCount: " & $self.leavesCount & + ", levels: " & $self.levels & + ", mcodec: " & $self.mcodec & " )" + +proc `$`*(self: CodexProof): string = + "CodexProof(" & + " nleaves: " & $self.nleaves & + ", index: " & $self.index & + ", path: " & $self.path.mapIt( byteutils.toHex(it) ) & + ", mcodec: " & $self.mcodec & " )" + +func compress*( + x, y: openArray[byte], + key: ByteTreeKey, + mhash: MHash): ?!ByteHash = + ## Compress two hashes + ## + + var digest = newSeq[byte](mhash.size) + mhash.coder(@x & @y & @[ key.byte ], digest) + success digest + +func init*( + _: type CodexTree, + mcodec: MultiCodec = Sha256HashCodec, + leaves: openArray[ByteHash]): ?!CodexTree = + + if leaves.len == 0: + return failure "Empty leaves" + + let + mhash = ? mcodec.mhash() + compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = + compress(x, y, key, mhash) + Zero: ByteHash = newSeq[byte](mhash.size) + + if mhash.size != leaves[0].len: + return failure "Invalid hash length" + + var + self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) + + self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true) + success self + +func init*( + _: type CodexTree, + leaves: openArray[MultiHash]): ?!CodexTree = + + if leaves.len == 0: + return failure "Empty leaves" + + let + mcodec = leaves[0].mcodec + leaves = leaves.mapIt( it.digestBytes ) + + CodexTree.init(mcodec, leaves) + +func init*( + _: type CodexTree, + leaves: openArray[Cid]): ?!CodexTree = + if leaves.len == 0: + return failure "Empty leaves" + + let + mcodec = (? leaves[0].mhash.mapFailure).mcodec + leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes ) + + CodexTree.init(mcodec, leaves) + +proc fromNodes*( + _: type CodexTree, + mcodec: MultiCodec = Sha256HashCodec, + nodes: openArray[ByteHash], + nleaves: int): ?!CodexTree = + + if nodes.len == 0: + return failure "Empty nodes" + + let + mhash = ? mcodec.mhash() + Zero = newSeq[byte](mhash.size) + compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = + compress(x, y, key, mhash) + + if mhash.size != nodes[0].len: + return failure "Invalid hash length" + + var + self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec) + layer = nleaves + pos = 0 + + while pos < nodes.len: + self.layers.add( nodes[pos..<(pos + layer)] ) + pos += layer + layer = divUp(layer, 2) + + let + index = Rng.instance.rand(nleaves - 1) + proof = ? self.getProof(index) + + if not ? proof.verify(self.leaves[index], ? self.root): # sanity check + return failure "Unable to verify tree built from nodes" + + success self + +func init*( + _: type CodexProof, + mcodec: MultiCodec = Sha256HashCodec, + index: int, + nleaves: int, + nodes: openArray[ByteHash]): ?!CodexProof = + + if nodes.len == 0: + return failure "Empty nodes" + + let + mhash = ? mcodec.mhash() + Zero = newSeq[byte](mhash.size) + compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} = + compress(x, y, key, mhash) + + success CodexProof( + compress: compressor, + zero: Zero, + mcodec: mcodec, + index: index, + nleaves: nleaves, + path: @nodes) diff --git a/codex/merkletree/merkletree.nim b/codex/merkletree/merkletree.nim new file mode 100644 index 00000000..2f46b93d --- /dev/null +++ b/codex/merkletree/merkletree.nim @@ -0,0 +1,153 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [].} + +import std/bitops + +import pkg/questionable/results + +import ../errors + +type + CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].} + + MerkleTree*[H, K] = ref object of RootObj + layers* : seq[seq[H]] + compress*: CompressFn[H, K] + zero* : H + + MerkleProof*[H, K] = ref object of RootObj + index* : int # linear index of the leaf, starting from 0 + path* : seq[H] # order: from the bottom to the top + nleaves* : int # number of leaves in the tree (=size of input) + compress*: CompressFn[H, K] # compress function + zero* : H # zero value + +func depth*[H, K](self: MerkleTree[H, K]): int = + return self.layers.len - 1 + +func leavesCount*[H, K](self: MerkleTree[H, K]): int = + return self.layers[0].len + +func levels*[H, K](self: MerkleTree[H, K]): int = + return self.layers.len + +func leaves*[H, K](self: MerkleTree[H, K]): seq[H] = + return self.layers[0] + +iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] = + for layer in self.layers: + yield layer + +iterator nodes*[H, K](self: MerkleTree[H, K]): H = + for layer in self.layers: + for node in layer: + yield node + +func root*[H, K](self: MerkleTree[H, K]): ?!H = + let last = self.layers[^1] + if last.len != 1: + return failure "invalid tree" + + return success last[0] + +func getProof*[H, K]( + self: MerkleTree[H, K], + index: int, + proof: MerkleProof[H, K]): ?!void = + let depth = self.depth + let nleaves = self.leavesCount + + if not (index >= 0 and index < nleaves): + return failure "index out of bounds" + + var path : seq[H] = newSeq[H](depth) + var k = index + var m = nleaves + for i in 0.. odd node + h = ? proof.compress( h, p, K(bottomFlag.ord + 2) ) + else: + # even node + h = ? proof.compress( h , p, bottomFlag ) + bottomFlag = K.KeyNone + j = j shr 1 + m = (m+1) shr 1 + + return success h + +func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool = + success bool(root == ? proof.reconstructRoot(leaf)) + +func merkleTreeWorker*[H, K]( + self: MerkleTree[H, K], + xs: openArray[H], + isBottomLayer: static bool): ?!seq[seq[H]] = + + let a = low(xs) + let b = high(xs) + let m = b - a + 1 + + when not isBottomLayer: + if m == 1: + return success @[ @xs ] + + let halfn: int = m div 2 + let n : int = 2 * halfn + let isOdd: bool = (n != m) + + var ys: seq[H] + if not isOdd: + ys = newSeq[H](halfn) + else: + ys = newSeq[H](halfn + 1) + + for i in 0.. self.networkStore.getBlock(BlockAddress.init(cid, i)) + # ) + + while not iter.finished: + let blocks = collect: + for i in 0.. 0): - trace "Got data from stream", len = chunk.len - without blk =? bt.Block.new(chunk): + without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err: + return failure(err) + + without cid =? Cid.init(CIDv1, dataCodec, mhash).mapFailure, err: + return failure(err) + + without blk =? bt.Block.new(cid, chunk, verify = false): return failure("Unable to init block from chunk!") - blockManifest.add(blk.cid) - if isErr (await self.blockStore.putBlock(blk)): - # trace "Unable to store block", cid = blk.cid - return failure(&"Unable to store block {blk.cid}") + cids.add(cid) + if err =? (await self.networkStore.putBlock(blk)).errorOption: + error "Unable to store block", cid = blk.cid, err = err.msg + return failure(&"Unable to store block {blk.cid}") except CancelledError as exc: raise exc except CatchableError as exc: @@ -200,193 +336,454 @@ proc store*( finally: await stream.close() - # Generate manifest - blockManifest.originalBytes = chunker.offset # store the exact file size - without data =? blockManifest.encode(): - return failure( - newException(CodexError, "Could not generate dataset manifest!")) + without tree =? CodexTree.init(cids), err: + return failure(err) - # Store as a dag-pb block - without manifest =? bt.Block.new(data = data, codec = DagPBCodec): - trace "Unable to init block from manifest data!" - return failure("Unable to init block from manifest data!") + without treeCid =? tree.rootCid(CIDv1, dataCodec), err: + return failure(err) - if isErr (await self.blockStore.putBlock(manifest)): - trace "Unable to store manifest", cid = manifest.cid - return failure("Unable to store manifest " & $manifest.cid) + for index, cid in cids: + without proof =? tree.getProof(index), err: + return failure(err) + if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption: + # TODO add log here + return failure(err) - without cid =? blockManifest.cid, error: - trace "Unable to generate manifest Cid!", exc = error.msg - return failure(error.msg) + let manifest = Manifest.new( + treeCid = treeCid, + blockSize = blockSize, + datasetSize = NBytes(chunker.offset), + version = CIDv1, + hcodec = hcodec, + codec = dataCodec) - trace "Stored data", manifestCid = manifest.cid, - contentCid = cid, - blocks = blockManifest.len + without manifestBlk =? await self.storeManifest(manifest), err: + error "Unable to store manifest" + return failure(err) - # Announce manifest - await self.discovery.provide(manifest.cid) + info "Stored data", manifestCid = manifestBlk.cid, + treeCid = treeCid, + blocks = manifest.blocksCount, + datasetSize = manifest.datasetSize - return manifest.cid.success + await self.discovery.provide(manifestBlk.cid) + await self.discovery.provide(treeCid) -proc requestStorage*(self: CodexNodeRef, - cid: Cid, - duration: UInt256, - nodes: uint, - tolerance: uint, - reward: UInt256, - expiry = UInt256.none): Future[?!PurchaseId] {.async.} = + return manifestBlk.cid.success + +proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = + without cids =? await self.networkStore.listBlocks(BlockType.Manifest): + warn "Failed to listBlocks" + return + + for c in cids: + if cid =? await c: + without blk =? await self.networkStore.getBlock(cid): + warn "Failed to get manifest block by cid", cid + return + + without manifest =? Manifest.decode(blk): + warn "Failed to decode manifest", cid + return + + onManifest(cid, manifest) + +proc setupRequest( + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + reward: UInt256, + collateral: UInt256, + expiry: UInt256): Future[?!StorageRequest] {.async.} = + ## Setup slots for a given dataset + ## + + let + ecK = nodes - tolerance + ecM = tolerance + + logScope: + cid = cid + duration = duration + nodes = nodes + tolerance = tolerance + reward = reward + proofProbability = proofProbability + collateral = collateral + expiry = expiry + ecK = ecK + ecM = ecM + + trace "Setting up slots" + + without manifest =? await self.fetchManifest(cid), error: + trace "Unable to fetch manifest for cid" + return failure error + + # ---------------------------------------------------------------------------- + # FIXME this is a BAND-AID to address + # https://github.com/codex-storage/nim-codex/issues/852 temporarily for the + # workshop. Remove this once we get that fixed. + if manifest.blocksCount.uint == ecK: + return failure("Cannot setup slots for a dataset with ecK == numBlocks. Please use a larger file or a different combination of `nodes` and `tolerance`.") + # ---------------------------------------------------------------------------- + + + # Erasure code the dataset according to provided parameters + let + erasure = Erasure.new( + self.networkStore.localStore, + leoEncoderProvider, + leoDecoderProvider, + self.taskpool) + + without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: + trace "Unable to erasure code dataset" + return failure(error) + + without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err: + trace "Unable to create slot builder" + return failure(err) + + without verifiable =? (await builder.buildManifest()), err: + trace "Unable to build verifiable manifest" + return failure(err) + + without manifestBlk =? await self.storeManifest(verifiable), err: + trace "Unable to store verifiable manifest" + return failure(err) + + let + verifyRoot = + if builder.verifyRoot.isNone: + return failure("No slots root") + else: + builder.verifyRoot.get.toBytes + + request = StorageRequest( + ask: StorageAsk( + slots: verifiable.numSlots.uint64, + slotSize: builder.slotBytes.uint.u256, + duration: duration, + proofProbability: proofProbability, + reward: reward, + collateral: collateral, + maxSlotLoss: tolerance + ), + content: StorageContent( + cid: $manifestBlk.cid, # TODO: why string? + merkleRoot: verifyRoot + ), + expiry: expiry + ) + + trace "Request created", request = $request + success request + +proc requestStorage*( + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + reward: UInt256, + collateral: UInt256, + expiry: UInt256): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. ## - ## Roughly the flow is as follows: - ## - Get the original cid from the store (should have already been uploaded) - ## - Erasure code it according to the nodes and tolerance parameters - ## - Run the PoR setup on the erasure dataset - ## - Call into the marketplace and purchasing contracts - ## - trace "Received a request for storage!", cid, duration, nodes, tolerance, reward - without contracts =? self.contracts: + logScope: + cid = cid + duration = duration + nodes = nodes + tolerance = tolerance + reward = reward + proofProbability = proofProbability + collateral = collateral + expiry = expiry.truncate(int64) + now = self.clock.now + + trace "Received a request for storage!" + + without contracts =? self.contracts.client: trace "Purchasing not available" return failure "Purchasing not available" - without manifest =? await self.fetchManifest(cid), error: - trace "Unable to fetch manifest for cid", cid - raise error - - # Erasure code the dataset according to provided parameters - without encoded =? (await self.erasure.encode(manifest, nodes.int, tolerance.int)), error: - trace "Unable to erasure code dataset", cid - return failure(error) - - without encodedData =? encoded.encode(), error: - trace "Unable to encode protected manifest" - return failure(error) - - without encodedBlk =? bt.Block.new(data = encodedData, codec = DagPBCodec), error: - trace "Unable to create block from encoded manifest" - return failure(error) - - if isErr (await self.blockStore.putBlock(encodedBlk)): - trace "Unable to store encoded manifest block", cid = encodedBlk.cid - return failure("Unable to store encoded manifest block") - - let request = StorageRequest( - ask: StorageAsk( - slots: nodes + tolerance, - slotSize: (encoded.blockSize * encoded.steps).u256, - duration: duration, - reward: reward, - maxSlotLoss: tolerance - ), - content: StorageContent( - cid: $encodedBlk.cid, - erasure: StorageErasure( - totalChunks: encoded.len.uint64, - ), - por: StoragePor( - u: @[], # TODO: PoR setup - publicKey: @[], # TODO: PoR setup - name: @[] # TODO: PoR setup - ) - ), - expiry: expiry |? 0.u256 - ) + without request =? + (await self.setupRequest( + cid, + duration, + proofProbability, + nodes, + tolerance, + reward, + collateral, + expiry)), err: + trace "Unable to setup request" + return failure err let purchase = await contracts.purchasing.purchase(request) - return success purchase.id + success purchase.id + +proc onStore( + self: CodexNodeRef, + request: StorageRequest, + slotIdx: UInt256, + blocksCb: BlocksCb): Future[?!void] {.async.} = + ## store data in local storage + ## + + logScope: + cid = request.content.cid + slotIdx = slotIdx + + trace "Received a request to store a slot" + + without cid =? Cid.init(request.content.cid).mapFailure, err: + trace "Unable to parse Cid", cid + return failure(err) + + without manifest =? (await self.fetchManifest(cid)), err: + trace "Unable to fetch manifest for cid", cid, err = err.msg + return failure(err) + + without builder =? Poseidon2Builder.new( + self.networkStore, manifest, manifest.verifiableStrategy + ), err: + trace "Unable to create slots builder", err = err.msg + return failure(err) + + let + slotIdx = slotIdx.truncate(int) + expiry = request.expiry.toSecondsSince1970 + + if slotIdx > manifest.slotRoots.high: + trace "Slot index not in manifest", slotIdx + return failure(newException(CodexError, "Slot index not in manifest")) + + proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} = + trace "Updating expiry for blocks", blocks = blocks.len + + let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) + if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: + return failure(updateExpiryErr) + + if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption: + trace "Unable to process blocks", err = err.msg + return failure(err) + + return success() + + without indexer =? manifest.verifiableStrategy.init( + 0, manifest.blocksCount - 1, manifest.numSlots).catch, err: + trace "Unable to create indexing strategy from protected manifest", err = err.msg + return failure(err) + + without blksIter =? indexer.getIndicies(slotIdx).catch, err: + trace "Unable to get indicies from strategy", err = err.msg + return failure(err) + + if err =? (await self.fetchBatched( + manifest.treeCid, + blksIter, + onBatch = updateExpiry)).errorOption: + trace "Unable to fetch blocks", err = err.msg + return failure(err) + + without slotRoot =? (await builder.buildSlot(slotIdx.Natural)), err: + trace "Unable to build slot", err = err.msg + return failure(err) + + trace "Slot successfully retrieved and reconstructed" + + if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]: + trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() + return failure(newException(CodexError, "Slot root mismatch")) + + trace "Slot successfully retrieved and reconstructed" + + return success() + +proc onProve( + self: CodexNodeRef, + slot: Slot, + challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + ## Generats a proof for a given slot and challenge + ## + + let + cidStr = slot.request.content.cid + slotIdx = slot.slotIndex.truncate(Natural) + + logScope: + cid = cidStr + slot = slotIdx + challenge = challenge + + trace "Received proof challenge" + + if prover =? self.prover: + trace "Prover enabled" + + without cid =? Cid.init(cidStr).mapFailure, err: + error "Unable to parse Cid", cid, err = err.msg + return failure(err) + + without manifest =? await self.fetchManifest(cid), err: + error "Unable to fetch manifest for cid", err = err.msg + return failure(err) + + when defined(verify_circuit): + without (inputs, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + error "Unable to generate proof", err = err.msg + return failure(err) + + without checked =? await prover.verify(proof, inputs), err: + error "Unable to verify proof", err = err.msg + return failure(err) + + if not checked: + error "Proof verification failed" + return failure("Proof verification failed") + + trace "Proof verified successfully" + else: + without (_, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + error "Unable to generate proof", err = err.msg + return failure(err) + + let groth16Proof = proof.toGroth16Proof() + trace "Proof generated successfully", groth16Proof + + success groth16Proof + else: + warn "Prover not enabled" + failure "Prover not enabled" + +proc onExpiryUpdate( + self: CodexNodeRef, + rootCid: string, + expiry: SecondsSince1970): Future[?!void] {.async.} = + without cid =? Cid.init(rootCid): + trace "Unable to parse Cid", cid + let error = newException(CodexError, "Unable to parse Cid") + return failure(error) + + return await self.updateExpiry(cid, expiry) + +proc onClear( + self: CodexNodeRef, + request: StorageRequest, + slotIndex: UInt256) = +# TODO: remove data from local storage + discard + +proc start*(self: CodexNodeRef) {.async.} = + if not self.engine.isNil: + await self.engine.start() + + if not self.discovery.isNil: + await self.discovery.start() + + if not self.clock.isNil: + await self.clock.start() + + if hostContracts =? self.contracts.host: + hostContracts.sales.onStore = + proc( + request: StorageRequest, + slot: UInt256, + onBatch: BatchProc): Future[?!void] = self.onStore(request, slot, onBatch) + + hostContracts.sales.onExpiryUpdate = + proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] = + self.onExpiryUpdate(rootCid, expiry) + + hostContracts.sales.onClear = + proc(request: StorageRequest, slotIndex: UInt256) = + # TODO: remove data from local storage + self.onClear(request, slotIndex) + + hostContracts.sales.onProve = + proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] = + # TODO: generate proof + self.onProve(slot, challenge) + + try: + await hostContracts.start() + except CancelledError as error: + raise error + except CatchableError as error: + error "Unable to start host contract interactions", error=error.msg + self.contracts.host = HostInteractions.none + + if clientContracts =? self.contracts.client: + try: + await clientContracts.start() + except CancelledError as error: + raise error + except CatchableError as error: + error "Unable to start client contract interactions: ", error=error.msg + self.contracts.client = ClientInteractions.none + + if validatorContracts =? self.contracts.validator: + try: + await validatorContracts.start() + except CancelledError as error: + raise error + except CatchableError as error: + error "Unable to start validator contract interactions: ", error=error.msg + self.contracts.validator = ValidatorInteractions.none + + self.networkId = self.switch.peerInfo.peerId + notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs + +proc stop*(self: CodexNodeRef) {.async.} = + trace "Stopping node" + + if not self.engine.isNil: + await self.engine.stop() + + if not self.discovery.isNil: + await self.discovery.stop() + + if not self.clock.isNil: + await self.clock.stop() + + if clientContracts =? self.contracts.client: + await clientContracts.stop() + + if hostContracts =? self.contracts.host: + await hostContracts.stop() + + if validatorContracts =? self.contracts.validator: + await validatorContracts.stop() + + if not self.networkStore.isNil: + await self.networkStore.close proc new*( T: type CodexNodeRef, switch: Switch, - store: BlockStore, + networkStore: NetworkStore, engine: BlockExcEngine, - erasure: Erasure, discovery: Discovery, - contracts = ContractInteractions.none): T = - T( + prover = Prover.none, + contracts = Contracts.default, + taskpool = Taskpool.new(num_threads = countProcessors())): CodexNodeRef = + ## Create new instance of a Codex self, call `start` to run it + ## + + CodexNodeRef( switch: switch, - blockStore: store, + networkStore: networkStore, engine: engine, - erasure: erasure, + prover: prover, discovery: discovery, - contracts: contracts) - -proc start*(node: CodexNodeRef) {.async.} = - if not node.switch.isNil: - await node.switch.start() - - if not node.engine.isNil: - await node.engine.start() - - if not node.erasure.isNil: - await node.erasure.start() - - if not node.discovery.isNil: - await node.discovery.start() - - if contracts =? node.contracts: - # TODO: remove Sales callbacks, pass BlockStore and StorageProofs instead - contracts.sales.onStore = proc(request: StorageRequest, - slot: UInt256, - availability: Availability) {.async.} = - ## store data in local storage - ## - - without cid =? Cid.init(request.content.cid): - trace "Unable to parse Cid", cid - raise newException(CodexError, "Unable to parse Cid") - - without manifest =? await node.fetchManifest(cid), error: - trace "Unable to fetch manifest for cid", cid - raise error - - trace "Fetching block for manifest", cid - # TODO: This will probably require a call to `getBlock` either way, - # since fetching of blocks will have to be selective according - # to a combination of parameters, such as node slot position - # and dataset geometry - let fetchRes = await node.fetchBatched(manifest) - if fetchRes.isErr: - raise newException(CodexError, "Unable to retrieve blocks") - - contracts.sales.onClear = proc(availability: Availability, - request: StorageRequest, - slotIndex: UInt256) = - # TODO: remove data from local storage - discard - - contracts.sales.onProve = proc(request: StorageRequest, - slot: UInt256): Future[seq[byte]] {.async.} = - # TODO: generate proof - return @[42'u8] - - try: - await contracts.start() - except CatchableError as error: - error "Unable to start contract interactions: ", error=error.msg - node.contracts = ContractInteractions.none - - node.networkId = node.switch.peerInfo.peerId - notice "Started codex node", id = $node.networkId, addrs = node.switch.peerInfo.addrs - -proc stop*(node: CodexNodeRef) {.async.} = - trace "Stopping node" - - if not node.engine.isNil: - await node.engine.stop() - - if not node.switch.isNil: - await node.switch.stop() - - if not node.erasure.isNil: - await node.erasure.stop() - - if not node.discovery.isNil: - await node.discovery.stop() - - if contracts =? node.contracts: - await contracts.stop() - - if not node.blockStore.isNil: - await node.blockStore.close + contracts: contracts, + taskpool: taskpool) diff --git a/codex/storageproofs/timing/periods.nim b/codex/periods.nim similarity index 100% rename from codex/storageproofs/timing/periods.nim rename to codex/periods.nim diff --git a/codex/proving.nim b/codex/proving.nim deleted file mode 100644 index 05b88de2..00000000 --- a/codex/proving.nim +++ /dev/null @@ -1,79 +0,0 @@ -import std/sets -import pkg/upraises -import pkg/questionable -import pkg/chronicles -import ./storageproofs -import ./clock - -export sets -export storageproofs - -type - Proving* = ref object - proofs: Proofs - clock: Clock - loop: ?Future[void] - slots*: HashSet[SlotId] - onProofRequired: ?OnProofRequired - OnProofRequired* = proc (id: SlotId) {.gcsafe, upraises:[].} - -func new*(_: type Proving, proofs: Proofs, clock: Clock): Proving = - Proving(proofs: proofs, clock: clock) - -proc `onProofRequired=`*(proving: Proving, callback: OnProofRequired) = - proving.onProofRequired = some callback - -func add*(proving: Proving, id: SlotId) = - proving.slots.incl(id) - -proc getCurrentPeriod(proving: Proving): Future[Period] {.async.} = - let periodicity = await proving.proofs.periodicity() - return periodicity.periodOf(proving.clock.now().u256) - -proc waitUntilPeriod(proving: Proving, period: Period) {.async.} = - let periodicity = await proving.proofs.periodicity() - await proving.clock.waitUntil(periodicity.periodStart(period).truncate(int64)) - -proc removeEndedContracts(proving: Proving) {.async.} = - let now = proving.clock.now().u256 - var ended: HashSet[SlotId] - for id in proving.slots: - if now >= (await proving.proofs.getProofEnd(id)): - ended.incl(id) - proving.slots.excl(ended) - -proc run(proving: Proving) {.async.} = - try: - while true: - let currentPeriod = await proving.getCurrentPeriod() - await proving.removeEndedContracts() - for id in proving.slots: - if (await proving.proofs.isProofRequired(id)) or - (await proving.proofs.willProofBeRequired(id)): - if callback =? proving.onProofRequired: - callback(id) - await proving.waitUntilPeriod(currentPeriod + 1) - except CancelledError: - discard - except CatchableError as e: - error "Proving failed", msg = e.msg - -proc start*(proving: Proving) {.async.} = - if proving.loop.isSome: - return - - proving.loop = some proving.run() - -proc stop*(proving: Proving) {.async.} = - if loop =? proving.loop: - proving.loop = Future[void].none - if not loop.finished: - await loop.cancelAndWait() - -proc submitProof*(proving: Proving, id: SlotId, proof: seq[byte]) {.async.} = - await proving.proofs.submitProof(id, proof) - -proc subscribeProofSubmission*(proving: Proving, - callback: OnProofSubmitted): - Future[Subscription] = - proving.proofs.subscribeProofSubmission(callback) diff --git a/codex/purchasing.nim b/codex/purchasing.nim index 656f1d23..ca92ece9 100644 --- a/codex/purchasing.nim +++ b/codex/purchasing.nim @@ -18,18 +18,15 @@ type clock: Clock purchases: Table[PurchaseId, Purchase] proofProbability*: UInt256 - requestExpiryInterval*: UInt256 PurchaseTimeout* = Timeout const DefaultProofProbability = 100.u256 -const DefaultRequestExpiryInterval = (10 * 60).u256 proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing = Purchasing( market: market, clock: clock, proofProbability: DefaultProofProbability, - requestExpiryInterval: DefaultRequestExpiryInterval, ) proc load*(purchasing: Purchasing) {.async.} = @@ -47,12 +44,11 @@ proc stop*(purchasing: Purchasing) {.async.} = discard proc populate*(purchasing: Purchasing, - request: StorageRequest): Future[StorageRequest] {.async.} = + request: StorageRequest + ): Future[StorageRequest] {.async.} = result = request if result.ask.proofProbability == 0.u256: result.ask.proofProbability = purchasing.proofProbability - if result.expiry == 0.u256: - result.expiry = (purchasing.clock.now().u256 + purchasing.requestExpiryInterval) if result.nonce == Nonce.default: var id = result.nonce.toArray doAssert randomBytes(id) == 32 @@ -60,7 +56,8 @@ proc populate*(purchasing: Purchasing, result.client = await purchasing.market.getSigner() proc purchase*(purchasing: Purchasing, - request: StorageRequest): Future[Purchase] {.async.} = + request: StorageRequest + ): Future[Purchase] {.async.} = let request = await purchasing.populate(request) let purchase = Purchase.new(request, purchasing.market, purchasing.clock) purchase.start() @@ -72,3 +69,10 @@ func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase = some purchasing.purchases[id] else: none Purchase + +func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] = + var pIds: seq[PurchaseId] = @[] + for key in purchasing.purchases.keys: + pIds.add(key) + return pIds + diff --git a/codex/purchasing/purchase.nim b/codex/purchasing/purchase.nim index 2301beff..d616e492 100644 --- a/codex/purchasing/purchase.nim +++ b/codex/purchasing/purchase.nim @@ -24,30 +24,39 @@ export Purchase export purchaseid export statemachine -func new*(_: type Purchase, - requestId: RequestId, - market: Market, - clock: Clock): Purchase = - Purchase( - future: Future[void].new(), - requestId: requestId, - market: market, - clock: clock - ) +func new*( + _: type Purchase, + requestId: RequestId, + market: Market, + clock: Clock +): Purchase = + ## create a new instance of a Purchase + ## + var purchase = Purchase.new() + {.cast(noSideEffect).}: + purchase.future = newFuture[void]() + purchase.requestId = requestId + purchase.market = market + purchase.clock = clock -func new*(_: type Purchase, - request: StorageRequest, - market: Market, - clock: Clock): Purchase = + return purchase + +func new*( + _: type Purchase, + request: StorageRequest, + market: Market, + clock: Clock +): Purchase = + ## Create a new purchase using the given market and clock let purchase = Purchase.new(request.id, market, clock) purchase.request = some request return purchase proc start*(purchase: Purchase) = - purchase.switch(PurchasePending()) + purchase.start(PurchasePending()) proc load*(purchase: Purchase) = - purchase.switch(PurchaseUnknown()) + purchase.start(PurchaseUnknown()) proc wait*(purchase: Purchase) {.async.} = await purchase.future @@ -63,3 +72,8 @@ func error*(purchase: Purchase): ?(ref CatchableError) = some purchase.future.error else: none (ref CatchableError) + +func state*(purchase: Purchase): ?string = + proc description(state: State): string = + $state + purchase.query(description) diff --git a/codex/purchasing/purchaseid.nim b/codex/purchasing/purchaseid.nim index ee5c3a16..226fcbee 100644 --- a/codex/purchasing/purchaseid.nim +++ b/codex/purchasing/purchaseid.nim @@ -1,8 +1,12 @@ import std/hashes import pkg/nimcrypto +import ../logutils type PurchaseId* = distinct array[32, byte] +logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog +logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog + proc hash*(x: PurchaseId): Hash {.borrow.} proc `==`*(x, y: PurchaseId): bool {.borrow.} proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex diff --git a/codex/purchasing/statemachine.nim b/codex/purchasing/statemachine.nim index aab01026..de2753c3 100644 --- a/codex/purchasing/statemachine.nim +++ b/codex/purchasing/statemachine.nim @@ -1,21 +1,18 @@ -import ../utils/statemachine +import ../utils/asyncstatemachine import ../market import ../clock import ../errors export market export clock -export statemachine +export asyncstatemachine type - Purchase* = ref object of StateMachine + Purchase* = ref object of Machine future*: Future[void] market*: Market clock*: Clock requestId*: RequestId request*: ?StorageRequest - PurchaseState* = ref object of AsyncState + PurchaseState* = ref object of State PurchaseError* = object of CodexError - -method description*(state: PurchaseState): string {.base.} = - raiseAssert "description not implemented for state" diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index 93798adb..f9bb1ece 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -1,20 +1,25 @@ +import pkg/metrics + +import ../../logutils import ../statemachine -import ./error +import ./errorhandling -type PurchaseCancelled* = ref object of PurchaseState +declareCounter(codex_purchases_cancelled, "codex purchases cancelled") -method enterAsync*(state: PurchaseCancelled) {.async.} = - without purchase =? (state.context as Purchase): - raiseAssert "invalid state" +logScope: + topics = "marketplace purchases cancelled" - try: - await purchase.market.withdrawFunds(purchase.requestId) - except CatchableError as error: - state.switch(PurchaseErrored(error: error)) - return +type PurchaseCancelled* = ref object of ErrorHandlingState + +method `$`*(state: PurchaseCancelled): string = + "cancelled" + +method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} = + codex_purchases_cancelled.inc() + let purchase = Purchase(machine) + + warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) let error = newException(Timeout, "Purchase cancelled due to timeout") - state.switch(PurchaseErrored(error: error)) - -method description*(state: PurchaseCancelled): string = - "cancelled" + purchase.future.fail(error) diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index edddc192..0ebe1dbe 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -1,13 +1,23 @@ +import pkg/metrics import ../statemachine +import ../../utils/exceptions +import ../../logutils + +declareCounter(codex_purchases_error, "codex purchases error") + +logScope: + topics = "marketplace purchases errored" type PurchaseErrored* = ref object of PurchaseState error*: ref CatchableError -method enter*(state: PurchaseErrored) = - without purchase =? (state.context as Purchase): - raiseAssert "invalid state" +method `$`*(state: PurchaseErrored): string = + "errored" + +method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} = + codex_purchases_error.inc() + let purchase = Purchase(machine) + + error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId purchase.future.fail(state.error) - -method description*(state: PurchaseErrored): string = - "errored" diff --git a/codex/purchasing/states/errorhandling.nim b/codex/purchasing/states/errorhandling.nim new file mode 100644 index 00000000..57e00924 --- /dev/null +++ b/codex/purchasing/states/errorhandling.nim @@ -0,0 +1,9 @@ +import pkg/questionable +import ../statemachine +import ./error + +type + ErrorHandlingState* = ref object of PurchaseState + +method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = + some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index 7f73104f..1ade3e4c 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -1,12 +1,16 @@ +import pkg/metrics import ../statemachine import ./error +declareCounter(codex_purchases_failed, "codex purchases failed") + type PurchaseFailed* = ref object of PurchaseState -method enter*(state: PurchaseFailed) = - let error = newException(PurchaseError, "Purchase failed") - state.switch(PurchaseErrored(error: error)) - -method description*(state: PurchaseFailed): string = +method `$`*(state: PurchaseFailed): string = "failed" + +method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = + codex_purchases_failed.inc() + let error = newException(PurchaseError, "Purchase failed") + return some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/finished.nim b/codex/purchasing/states/finished.nim index ce933207..0f97150d 100644 --- a/codex/purchasing/states/finished.nim +++ b/codex/purchasing/states/finished.nim @@ -1,12 +1,20 @@ +import pkg/metrics + import ../statemachine +import ../../logutils + +declareCounter(codex_purchases_finished, "codex purchases finished") + +logScope: + topics = "marketplace purchases finished" type PurchaseFinished* = ref object of PurchaseState -method enter*(state: PurchaseFinished) = - without purchase =? (state.context as Purchase): - raiseAssert "invalid state" - - purchase.future.complete() - -method description*(state: PurchaseFinished): string = +method `$`*(state: PurchaseFinished): string = "finished" + +method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} = + codex_purchases_finished.inc() + let purchase = Purchase(machine) + info "Purchase finished", requestId = purchase.requestId + purchase.future.complete() diff --git a/codex/purchasing/states/pending.nim b/codex/purchasing/states/pending.nim index 8ade593c..4852f266 100644 --- a/codex/purchasing/states/pending.nim +++ b/codex/purchasing/states/pending.nim @@ -1,21 +1,18 @@ +import pkg/metrics import ../statemachine +import ./errorhandling import ./submitted -import ./error -type PurchasePending* = ref object of PurchaseState +declareCounter(codex_purchases_pending, "codex purchases pending") -method enterAsync(state: PurchasePending) {.async.} = - without purchase =? (state.context as Purchase) and - request =? purchase.request: - raiseAssert "invalid state" +type PurchasePending* = ref object of ErrorHandlingState - try: - await purchase.market.requestStorage(request) - except CatchableError as error: - state.switch(PurchaseErrored(error: error)) - return - - state.switch(PurchaseSubmitted()) - -method description*(state: PurchasePending): string = +method `$`*(state: PurchasePending): string = "pending" + +method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} = + codex_purchases_pending.inc() + let purchase = Purchase(machine) + let request = !purchase.request + await purchase.market.requestStorage(request) + return some State(PurchaseSubmitted()) diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 6d134c5e..4cd1268b 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -1,32 +1,41 @@ +import pkg/metrics + +import ../../logutils import ../statemachine -import ./error +import ./errorhandling import ./finished import ./failed -type PurchaseStarted* = ref object of PurchaseState +declareCounter(codex_purchases_started, "codex purchases started") -method enterAsync*(state: PurchaseStarted) {.async.} = - without purchase =? (state.context as Purchase): - raiseAssert "invalid state" +logScope: + topics = "marketplace purchases started" + +type PurchaseStarted* = ref object of ErrorHandlingState + +method `$`*(state: PurchaseStarted): string = + "started" + +method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} = + codex_purchases_started.inc() + let purchase = Purchase(machine) let clock = purchase.clock let market = purchase.market + info "All required slots filled, purchase started", requestId = purchase.requestId let failed = newFuture[void]() proc callback(_: RequestId) = failed.complete() let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) - let ended = clock.waitUntil(await market.getRequestEnd(purchase.requestId)) - try: - let fut = await one(ended, failed) - if fut.id == failed.id: - state.switch(PurchaseFailed()) - else: - state.switch(PurchaseFinished()) - await subscription.unsubscribe() - except CatchableError as error: - state.switch(PurchaseErrored(error: error)) - -method description*(state: PurchaseStarted): string = - "started" + # Ensure that we're past the request end by waiting an additional second + let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) + let fut = await one(ended, failed) + await subscription.unsubscribe() + if fut.id == failed.id: + ended.cancel() + return some State(PurchaseFailed()) + else: + failed.cancel() + return some State(PurchaseFinished()) diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 9d5c8589..5532c850 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -1,18 +1,30 @@ +import pkg/metrics + +import ../../logutils import ../statemachine -import ./error +import ./errorhandling import ./started import ./cancelled -type PurchaseSubmitted* = ref object of PurchaseState +logScope: + topics = "marketplace purchases submitted" -method enterAsync(state: PurchaseSubmitted) {.async.} = - without purchase =? (state.context as Purchase) and - request =? purchase.request: - raiseAssert "invalid state" +declareCounter(codex_purchases_submitted, "codex purchases submitted") +type PurchaseSubmitted* = ref object of ErrorHandlingState + +method `$`*(state: PurchaseSubmitted): string = + "submitted" + +method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} = + codex_purchases_submitted.inc() + let purchase = Purchase(machine) + let request = !purchase.request let market = purchase.market let clock = purchase.clock + info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId + proc wait {.async.} = let done = newFuture[void]() proc callback(_: RequestId) = @@ -22,19 +34,13 @@ method enterAsync(state: PurchaseSubmitted) {.async.} = await subscription.unsubscribe() proc withTimeout(future: Future[void]) {.async.} = - let expiry = request.expiry.truncate(int64) + let expiry = (await market.requestExpiresAt(request.id)) + 1 + trace "waiting for request fulfillment or expiry", expiry await future.withTimeout(clock, expiry) try: await wait().withTimeout() except Timeout: - state.switch(PurchaseCancelled()) - return - except CatchableError as error: - state.switch(PurchaseErrored(error: error)) - return + return some State(PurchaseCancelled()) - state.switch(PurchaseStarted()) - -method description*(state: PurchaseSubmitted): string = - "submitted" + return some State(PurchaseStarted()) diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index 0102fa43..ade70c9f 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -1,37 +1,35 @@ +import pkg/metrics import ../statemachine +import ./errorhandling import ./submitted import ./started import ./cancelled import ./finished import ./failed -import ./error -type PurchaseUnknown* = ref object of PurchaseState +declareCounter(codex_purchases_unknown, "codex purchases unknown") -method enterAsync(state: PurchaseUnknown) {.async.} = - without purchase =? (state.context as Purchase): - raiseAssert "invalid state" +type PurchaseUnknown* = ref object of ErrorHandlingState - try: - if (request =? await purchase.market.getRequest(purchase.requestId)) and - (requestState =? await purchase.market.getState(purchase.requestId)): - - purchase.request = some request - - case requestState - of RequestState.New: - state.switch(PurchaseSubmitted()) - of RequestState.Started: - state.switch(PurchaseStarted()) - of RequestState.Cancelled: - state.switch(PurchaseCancelled()) - of RequestState.Finished: - state.switch(PurchaseFinished()) - of RequestState.Failed: - state.switch(PurchaseFailed()) - - except CatchableError as error: - state.switch(PurchaseErrored(error: error)) - -method description*(state: PurchaseUnknown): string = +method `$`*(state: PurchaseUnknown): string = "unknown" + +method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} = + codex_purchases_unknown.inc() + let purchase = Purchase(machine) + if (request =? await purchase.market.getRequest(purchase.requestId)) and + (requestState =? await purchase.market.requestState(purchase.requestId)): + + purchase.request = some request + + case requestState + of RequestState.New: + return some State(PurchaseSubmitted()) + of RequestState.Started: + return some State(PurchaseStarted()) + of RequestState.Cancelled: + return some State(PurchaseCancelled()) + of RequestState.Finished: + return some State(PurchaseFinished()) + of RequestState.Failed: + return some State(PurchaseFailed()) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 0fd87b43..b209e7c9 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -13,26 +13,30 @@ push: {.upraises: [].} import std/sequtils -import std/sugar import pkg/questionable import pkg/questionable/results -import pkg/chronicles import pkg/chronos -import pkg/presto -import pkg/libp2p +import pkg/presto except toJson +import pkg/metrics except toJson import pkg/stew/base10 import pkg/stew/byteutils import pkg/confutils +import pkg/libp2p import pkg/libp2p/routing_record -import pkg/libp2pdht/discv5/spr as spr +import pkg/codexdht/discv5/spr as spr +import ../logutils import ../node import ../blocktype import ../conf import ../contracts -import ../streams +import ../erasure/erasure +import ../manifest +import ../streams/asyncstreamwrapper +import ../stores +import ../utils/options import ./coders import ./json @@ -40,130 +44,91 @@ import ./json logScope: topics = "codex restapi" +declareCounter(codex_api_uploads, "codex API uploads") +declareCounter(codex_api_downloads, "codex API downloads") + proc validate( pattern: string, value: string): int {.gcsafe, raises: [Defect].} = 0 -proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter = - var router = RestRouter.init(validate) - router.api( - MethodGet, - "/api/codex/v1/connect/{peerId}") do ( - peerId: PeerID, - addrs: seq[MultiAddress]) -> RestApiResponse: - ## Connect to a peer - ## - ## If `addrs` param is supplied, it will be used to - ## dial the peer, otherwise the `peerId` is used - ## to invoke peer discovery, if it succeeds - ## the returned addresses will be used to dial - ## +proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} = + var content: seq[RestContent] - if peerId.isErr: - return RestApiResponse.error( - Http400, - $peerId.error()) + proc formatManifest(cid: Cid, manifest: Manifest) = + let restContent = RestContent.init(cid, manifest) + content.add(restContent) - let addresses = if addrs.isOk and addrs.get().len > 0: - addrs.get() - else: - without peerRecord =? (await node.findPeer(peerId.get())): - return RestApiResponse.error( - Http400, - "Unable to find Peer!") - peerRecord.addresses.mapIt(it.address) - try: - await node.connect(peerId.get(), addresses) - return RestApiResponse.response("Successfully connected to peer") - except DialFailedError as e: - return RestApiResponse.error(Http400, "Unable to dial peer") - except CatchableError as e: - return RestApiResponse.error(Http400, "Unknown error dialling peer") + await node.iterateManifests(formatManifest) + return %RestContentList.init(content) - router.api( - MethodGet, - "/api/codex/v1/download/{id}") do ( - id: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download a file from the node in a streaming - ## manner - ## +proc retrieveCid( + node: CodexNodeRef, + cid: Cid, + local: bool = true, + resp: HttpResponseRef): Future[RestApiResponse] {.async.} = + ## Download a file from the node in a streaming + ## manner + ## - if id.isErr: - return RestApiResponse.error( - Http400, - $id.error()) - - var - stream: LPStream - - var bytes = 0 - try: - without stream =? (await node.retrieve(id.get())), error: - return RestApiResponse.error(Http404, error.msg) - - resp.addHeader("Content-Type", "application/octet-stream") - await resp.prepareChunked() - - while not stream.atEof: - var - buff = newSeqUninitialized[byte](BlockSize) - len = await stream.readOnce(addr buff[0], buff.len) - - buff.setLen(len) - if buff.len <= 0: - break - - bytes += buff.len - trace "Sending chunk", size = buff.len - await resp.sendChunk(addr buff[0], buff.len) - await resp.finish() - except CatchableError as exc: - trace "Excepting streaming blocks", exc = exc.msg - return RestApiResponse.error(Http500) - finally: - trace "Sent bytes", cid = id.get(), bytes - if not stream.isNil: - await stream.close() - - router.rawApi( - MethodPost, - "/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse: - ## Create a request for storage - ## - ## cid - the cid of a previously uploaded dataset - ## duration - the duration of the contract - ## reward - the maximum price the client is willing to pay - - without cid =? cid.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg) - - let body = await request.getBody() - - without params =? StorageRequestParams.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg) - - let nodes = params.nodes |? 1 - let tolerance = params.nodes |? 0 - - without purchaseId =? await node.requestStorage( - cid, - params.duration, - nodes, - tolerance, - params.reward, - params.expiry), error: + var + stream: LPStream + var bytes = 0 + try: + without stream =? (await node.retrieve(cid, local)), error: + if error of BlockNotFoundError: + return RestApiResponse.error(Http404, error.msg) + else: return RestApiResponse.error(Http500, error.msg) - return RestApiResponse.response(purchaseId.toHex) + resp.addHeader("Content-Type", "application/octet-stream") + await resp.prepareChunked() + + while not stream.atEof: + var + buff = newSeqUninitialized[byte](DefaultBlockSize.int) + len = await stream.readOnce(addr buff[0], buff.len) + + buff.setLen(len) + if buff.len <= 0: + break + + bytes += buff.len + await resp.sendChunk(addr buff[0], buff.len) + await resp.finish() + codex_api_downloads.inc() + except CatchableError as exc: + warn "Excepting streaming blocks", exc = exc.msg + return RestApiResponse.error(Http500) + finally: + info "Sent bytes", cid = cid, bytes + if not stream.isNil: + await stream.close() + +proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) = + let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion + + router.api( + MethodOptions, + "/api/codex/v1/data") do ( + resp: HttpResponseRef) -> RestApiResponse: + + if corsOrigin =? allowedOrigin: + resp.setHeader("Access-Control-Allow-Origin", corsOrigin) + resp.setHeader("Access-Control-Allow-Methods", "POST, OPTIONS") + resp.setHeader("Access-Control-Allow-Headers", "content-type") + resp.setHeader("Access-Control-Max-Age", "86400") + + resp.status = Http204 + await resp.sendBody("") router.rawApi( MethodPost, - "/api/codex/v1/upload") do ( + "/api/codex/v1/data") do ( ) -> RestApiResponse: - ## Upload a file in a streamming manner + ## Upload a file in a streaming manner ## trace "Handling file upload" @@ -186,18 +151,487 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter = trace "Error uploading file", exc = error.msg return RestApiResponse.error(Http500, error.msg) + codex_api_uploads.inc() trace "Uploaded file", cid return RestApiResponse.response($cid) - except CancelledError as exc: + except CancelledError: + trace "Upload cancelled error" return RestApiResponse.error(Http500) except AsyncStreamError: + trace "Async stream error" return RestApiResponse.error(Http500) finally: await reader.closeWait() - # if we got here something went wrong? + trace "Something went wrong error" return RestApiResponse.error(Http500) + router.api( + MethodGet, + "/api/codex/v1/data") do () -> RestApiResponse: + let json = await formatManifestBlocks(node) + return RestApiResponse.response($json, contentType="application/json") + + router.api( + MethodGet, + "/api/codex/v1/data/{cid}") do ( + cid: Cid, resp: HttpResponseRef) -> RestApiResponse: + ## Download a file from the local node in a streaming + ## manner + if cid.isErr: + return RestApiResponse.error( + Http400, + $cid.error()) + + if corsOrigin =? allowedOrigin: + resp.setHeader("Access-Control-Allow-Origin", corsOrigin) + resp.setHeader("Access-Control-Allow-Methods", "GET, OPTIONS") + resp.setHeader("Access-Control-Headers", "X-Requested-With") + resp.setHeader("Access-Control-Max-Age", "86400") + + await node.retrieveCid(cid.get(), local = true, resp=resp) + + router.api( + MethodGet, + "/api/codex/v1/data/{cid}/network") do ( + cid: Cid, resp: HttpResponseRef) -> RestApiResponse: + ## Download a file from the network in a streaming + ## manner + ## + + if cid.isErr: + return RestApiResponse.error( + Http400, + $cid.error()) + + if corsOrigin =? allowedOrigin: + resp.setHeader("Access-Control-Allow-Origin", corsOrigin) + resp.setHeader("Access-Control-Allow-Methods", "GET, OPTIONS") + resp.setHeader("Access-Control-Headers", "X-Requested-With") + resp.setHeader("Access-Control-Max-Age", "86400") + + await node.retrieveCid(cid.get(), local = false, resp=resp) + + router.api( + MethodGet, + "/api/codex/v1/space") do () -> RestApiResponse: + let json = % RestRepoStore( + totalBlocks: repoStore.totalBlocks, + quotaMaxBytes: repoStore.quotaMaxBytes, + quotaUsedBytes: repoStore.quotaUsedBytes, + quotaReservedBytes: repoStore.quotaReservedBytes + ) + return RestApiResponse.response($json, contentType="application/json") + +proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = + router.api( + MethodGet, + "/api/codex/v1/sales/slots") do () -> RestApiResponse: + ## Returns active slots for the host + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Sales unavailable") + + let json = %(await contracts.sales.mySlots()) + return RestApiResponse.response($json, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.api( + MethodGet, + "/api/codex/v1/sales/slots/{slotId}") do (slotId: SlotId) -> RestApiResponse: + ## Returns active slot with id {slotId} for the host. Returns 404 if the + ## slot is not active for the host. + + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Sales unavailable") + + without slotId =? slotId.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + + without agent =? await contracts.sales.activeSale(slotId): + return RestApiResponse.error(Http404, "Provider not filling slot") + + let restAgent = RestSalesAgent( + state: agent.state() |? "none", + slotIndex: agent.data.slotIndex, + requestId: agent.data.requestId + ) + + return RestApiResponse.response(restAgent.toJson, contentType="application/json") + + router.api( + MethodGet, + "/api/codex/v1/sales/availability") do () -> RestApiResponse: + ## Returns storage that is for sale + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Sales unavailable") + + without avails =? (await contracts.sales.context.reservations.all(Availability)), err: + return RestApiResponse.error(Http500, err.msg) + + let json = %avails + return RestApiResponse.response($json, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.rawApi( + MethodPost, + "/api/codex/v1/sales/availability") do () -> RestApiResponse: + ## Add available storage to sell. + ## Every time Availability's offer finishes, its capacity is returned to the availability. + ## + ## totalSize - size of available storage in bytes + ## duration - maximum time the storage should be sold for (in seconds) + ## minPrice - minimum price to be paid (in amount of tokens) + ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Sales unavailable") + + let body = await request.getBody() + + without restAv =? RestAvailability.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg) + + let reservations = contracts.sales.context.reservations + + if restAv.totalSize == 0: + return RestApiResponse.error(Http400, "Total size must be larger then zero") + + if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): + return RestApiResponse.error(Http422, "Not enough storage quota") + + without availability =? ( + await reservations.createAvailability( + restAv.totalSize, + restAv.duration, + restAv.minPrice, + restAv.maxCollateral) + ), error: + return RestApiResponse.error(Http500, error.msg) + + return RestApiResponse.response(availability.toJson, + Http201, + contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.rawApi( + MethodPatch, + "/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId) -> RestApiResponse: + ## Updates Availability. + ## The new parameters will be only considered for new requests. + ## Existing Requests linked to this Availability will continue as is. + ## + ## totalSize - size of available storage in bytes. When decreasing the size, then lower limit is the currently `totalSize - freeSize`. + ## duration - maximum time the storage should be sold for (in seconds) + ## minPrice - minimum price to be paid (in amount of tokens) + ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Sales unavailable") + + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + without keyId =? id.key.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + + let + body = await request.getBody() + reservations = contracts.sales.context.reservations + + type OptRestAvailability = Optionalize(RestAvailability) + without restAv =? OptRestAvailability.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg) + + without availability =? (await reservations.get(keyId, Availability)), error: + if error of NotExistsError: + return RestApiResponse.error(Http404, "Availability not found") + + return RestApiResponse.error(Http500, error.msg) + + if isSome restAv.freeSize: + return RestApiResponse.error(Http400, "Updating freeSize is not allowed") + + if size =? restAv.totalSize: + # we don't allow lowering the totalSize bellow currently utilized size + if size < (availability.totalSize - availability.freeSize): + return RestApiResponse.error(Http400, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize)) + + availability.freeSize += size - availability.totalSize + availability.totalSize = size + + if duration =? restAv.duration: + availability.duration = duration + + if minPrice =? restAv.minPrice: + availability.minPrice = minPrice + + if maxCollateral =? restAv.maxCollateral: + availability.maxCollateral = maxCollateral + + if err =? (await reservations.update(availability)).errorOption: + return RestApiResponse.error(Http500, err.msg) + + return RestApiResponse.response(Http200) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.rawApi( + MethodGet, + "/api/codex/v1/sales/availability/{id}/reservations") do (id: AvailabilityId) -> RestApiResponse: + ## Gets Availability's reservations. + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Sales unavailable") + + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + without keyId =? id.key.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + + let reservations = contracts.sales.context.reservations + + if error =? (await reservations.get(keyId, Availability)).errorOption: + if error of NotExistsError: + return RestApiResponse.error(Http404, "Availability not found") + else: + return RestApiResponse.error(Http500, error.msg) + + without availabilitysReservations =? (await reservations.all(Reservation, id)), err: + return RestApiResponse.error(Http500, err.msg) + + # TODO: Expand this structure with information about the linked StorageRequest not only RequestID + return RestApiResponse.response(availabilitysReservations.toJson, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + +proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = + router.rawApi( + MethodPost, + "/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse: + ## Create a request for storage + ## + ## cid - the cid of a previously uploaded dataset + ## duration - the duration of the request in seconds + ## proofProbability - how often storage proofs are required + ## reward - the maximum amount of tokens paid per second per slot to hosts the client is willing to pay + ## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data + ## nodes - number of nodes the content should be stored on + ## tolerance - allowed number of nodes that can be lost before content is lost + ## colateral - requested collateral from hosts when they fill slot + + try: + without contracts =? node.contracts.client: + return RestApiResponse.error(Http503, "Purchasing unavailable") + + without cid =? cid.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + + let body = await request.getBody() + + without params =? StorageRequestParams.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg) + + let nodes = params.nodes |? 1 + let tolerance = params.tolerance |? 0 + + # prevent underflow + if tolerance > nodes: + return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`") + + let ecK = nodes - tolerance + let ecM = tolerance # for readability + + # ensure leopard constrainst of 1 < K ≥ M + if ecK <= 1 or ecK < ecM: + return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`") + + without expiry =? params.expiry: + return RestApiResponse.error(Http400, "Expiry required") + + if expiry <= 0 or expiry >= params.duration: + return RestApiResponse.error(Http400, "Expiry needs value bigger then zero and smaller then the request's duration") + + without purchaseId =? await node.requestStorage( + cid, + params.duration, + params.proofProbability, + nodes, + tolerance, + params.reward, + params.collateral, + expiry), error: + + if error of InsufficientBlocksError: + return RestApiResponse.error(Http400, + "Dataset too small for erasure parameters, need at least " & + $(ref InsufficientBlocksError)(error).minSize.int & " bytes") + + return RestApiResponse.error(Http500, error.msg) + + return RestApiResponse.response(purchaseId.toHex) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.api( + MethodGet, + "/api/codex/v1/storage/purchases/{id}") do ( + id: PurchaseId) -> RestApiResponse: + + try: + without contracts =? node.contracts.client: + return RestApiResponse.error(Http503, "Purchasing unavailable") + + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + + without purchase =? contracts.purchasing.getPurchase(id): + return RestApiResponse.error(Http404) + + let json = % RestPurchase( + state: purchase.state |? "none", + error: purchase.error.?msg, + request: purchase.request, + requestId: purchase.requestId + ) + + return RestApiResponse.response($json, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.api( + MethodGet, + "/api/codex/v1/storage/purchases") do () -> RestApiResponse: + try: + without contracts =? node.contracts.client: + return RestApiResponse.error(Http503, "Purchasing unavailable") + + let purchaseIds = contracts.purchasing.getPurchaseIds() + return RestApiResponse.response($ %purchaseIds, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + +proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = + ## various node management api's + ## + router.api( + MethodGet, + "/api/codex/v1/spr") do () -> RestApiResponse: + ## Returns node SPR in requested format, json or text. + ## + try: + without spr =? node.discovery.dhtRecord: + return RestApiResponse.response("", status=Http503, contentType="application/json") + + if $preferredContentType().get() == "text/plain": + return RestApiResponse.response(spr.toURI, contentType="text/plain") + else: + return RestApiResponse.response($ %* {"spr": spr.toURI}, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.api( + MethodGet, + "/api/codex/v1/peerid") do () -> RestApiResponse: + ## Returns node's peerId in requested format, json or text. + ## + try: + let id = $node.switch.peerInfo.peerId + + if $preferredContentType().get() == "text/plain": + return RestApiResponse.response(id, contentType="text/plain") + else: + return RestApiResponse.response($ %* {"id": id}, contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.api( + MethodGet, + "/api/codex/v1/connect/{peerId}") do ( + peerId: PeerId, + addrs: seq[MultiAddress]) -> RestApiResponse: + ## Connect to a peer + ## + ## If `addrs` param is supplied, it will be used to + ## dial the peer, otherwise the `peerId` is used + ## to invoke peer discovery, if it succeeds + ## the returned addresses will be used to dial + ## + ## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs` + ## + + if peerId.isErr: + return RestApiResponse.error( + Http400, + $peerId.error()) + + let addresses = if addrs.isOk and addrs.get().len > 0: + addrs.get() + else: + without peerRecord =? (await node.findPeer(peerId.get())): + return RestApiResponse.error( + Http400, + "Unable to find Peer!") + peerRecord.addresses.mapIt(it.address) + try: + await node.connect(peerId.get(), addresses) + return RestApiResponse.response("Successfully connected to peer") + except DialFailedError: + return RestApiResponse.error(Http400, "Unable to dial peer") + except CatchableError: + return RestApiResponse.error(Http500, "Unknown error dialling peer") + +proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = + router.api( + MethodGet, + "/api/codex/v1/debug/info") do () -> RestApiResponse: + ## Print rudimentary node information + ## + + try: + let table = RestRoutingTable.init(node.discovery.protocol.routingTable) + + let + json = %*{ + "id": $node.switch.peerInfo.peerId, + "addrs": node.switch.peerInfo.addrs.mapIt( $it ), + "repo": $conf.dataDir, + "spr": + if node.discovery.dhtRecord.isSome: + node.discovery.dhtRecord.get.toURI + else: + "", + "announceAddresses": node.discovery.announceAddrs, + "table": table, + "codex": { + "version": $codexVersion, + "revision": $codexRevision + } + } + + # return pretty json for human readability + return RestApiResponse.response(json.pretty(), contentType="application/json") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + router.api( MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do ( @@ -209,87 +643,53 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter = ## `level` - chronicles log level ## - without res =? level and level =? res: - return RestApiResponse.error(Http400, "Missing log level") + try: + without res =? level and level =? res: + return RestApiResponse.error(Http400, "Missing log level") + + try: + {.gcsafe.}: + updateLogLevel(level) + except CatchableError as exc: + return RestApiResponse.error(Http500, exc.msg) + + return RestApiResponse.response("") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + when codex_enable_api_debug_peers: + router.api( + MethodGet, + "/api/codex/v1/debug/peer/{peerId}") do (peerId: PeerId) -> RestApiResponse: try: - {.gcsafe.}: - updateLogLevel(level) + trace "debug/peer start" + without peerRecord =? (await node.findPeer(peerId.get())): + trace "debug/peer peer not found!" + return RestApiResponse.error( + Http400, + "Unable to find Peer!") + + let json = %RestPeerRecord.init(peerRecord) + trace "debug/peer returning peer record" + return RestApiResponse.response($json) except CatchableError as exc: - return RestApiResponse.error(Http500, exc.msg) + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) - return RestApiResponse.response("") +proc initRestApi*( + node: CodexNodeRef, + conf: CodexConf, + repoStore: RepoStore, + corsAllowedOrigin: ?string): RestRouter = - router.api( - MethodGet, - "/api/codex/v1/debug/info") do () -> RestApiResponse: - ## Print rudimentary node information - ## - - let - json = %*{ - "id": $node.switch.peerInfo.peerId, - "addrs": node.switch.peerInfo.addrs.mapIt( $it ), - "repo": $conf.dataDir, - "spr": - if node.discovery.dhtRecord.isSome: - node.discovery.dhtRecord.get.toURI - else: - "" - } - - return RestApiResponse.response($json) - - router.api( - MethodGet, - "/api/codex/v1/sales/availability") do () -> RestApiResponse: - ## Returns storage that is for sale - - without contracts =? node.contracts: - return RestApiResponse.error(Http503, "Sales unavailable") - - let json = %contracts.sales.available - return RestApiResponse.response($json) - - router.rawApi( - MethodPost, - "/api/codex/v1/sales/availability") do () -> RestApiResponse: - ## Add available storage to sell - ## - ## size - size of available storage in bytes - ## duration - maximum time the storage should be sold for (in seconds) - ## minPrice - minimum price to be paid (in amount of tokens) - - without contracts =? node.contracts: - return RestApiResponse.error(Http503, "Sales unavailable") - - let body = await request.getBody() - - without availability =? Availability.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg) - - contracts.sales.add(availability) - - let json = %availability - return RestApiResponse.response($json) - - router.api( - MethodGet, - "/api/codex/v1/storage/purchases/{id}") do ( - id: PurchaseId) -> RestApiResponse: - - without contracts =? node.contracts: - return RestApiResponse.error(Http503, "Purchasing unavailable") - - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg) - - without purchase =? contracts.purchasing.getPurchase(id): - return RestApiResponse.error(Http404) - - let json = %purchase - - return RestApiResponse.response($json) + var router = RestRouter.init(validate, corsAllowedOrigin) + initDataApi(node, repoStore, router) + initSalesApi(node, router) + initPurchasingApi(node, router) + initNodeApi(node, conf, router) + initDebugApi(node, conf, router) return router diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index cef76a38..0be1a638 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -19,6 +19,7 @@ import pkg/stint import ../sales import ../purchasing +import ../utils/stintutils proc encodeString*(cid: type Cid): Result[string, cstring] = ok($cid) @@ -37,7 +38,7 @@ proc encodeString*(peerId: PeerId): Result[string, cstring] = ok($peerId) proc decodeString*(T: type PeerId, value: string): Result[PeerId, cstring] = - PeerID.init(value) + PeerId.init(value) proc encodeString*(address: MultiAddress): Result[string, cstring] = ok($address) @@ -72,7 +73,7 @@ proc encodeString*(value: bool): Result[string, cstring] = proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] = try: - ok UInt256.fromHex(value) + ok UInt256.fromDecimal(value) except ValueError as e: err e.msg.cstring @@ -83,7 +84,7 @@ proc decodeString*(_: type array[32, byte], except ValueError as e: err e.msg.cstring -proc decodeString*[T: PurchaseId | RequestId | Nonce](_: type T, +proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T, value: string): Result[T, cstring] = array[32, byte].decodeString(value).map(id => T(id)) diff --git a/codex/rest/json.nim b/codex/rest/json.nim index 2add0e94..fba708be 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -1,56 +1,124 @@ -import std/json -import std/strutils +import pkg/questionable import pkg/stew/byteutils -import pkg/questionable/results +import pkg/libp2p +import pkg/codexdht/discv5/node as dn +import pkg/codexdht/discv5/routing_table as rt import ../sales import ../purchasing +import ../utils/json +import ../manifest +import ../units + +export json type StorageRequestParams* = object - duration*: UInt256 - reward*: UInt256 - expiry*: ?UInt256 - nodes*: ?uint - tolerance*: ?uint + duration* {.serialize.}: UInt256 + proofProbability* {.serialize.}: UInt256 + reward* {.serialize.}: UInt256 + collateral* {.serialize.}: UInt256 + expiry* {.serialize.}: ?UInt256 + nodes* {.serialize.}: ?uint + tolerance* {.serialize.}: ?uint -proc fromJson*(_: type Availability, bytes: seq[byte]): ?!Availability = - let json = ?catch parseJson(string.fromBytes(bytes)) - let size = ?catch UInt256.fromHex(json["size"].getStr) - let duration = ?catch UInt256.fromHex(json["duration"].getStr) - let minPrice = ?catch UInt256.fromHex(json["minPrice"].getStr) - success Availability.init(size, duration, minPrice) + RestPurchase* = object + requestId* {.serialize.}: RequestId + request* {.serialize.}: ?StorageRequest + state* {.serialize.}: string + error* {.serialize.}: ?string -proc fromJson*(_: type StorageRequestParams, - bytes: seq[byte]): ?! StorageRequestParams = - let json = ?catch parseJson(string.fromBytes(bytes)) - let duration = ?catch UInt256.fromHex(json["duration"].getStr) - let reward = ?catch UInt256.fromHex(json["reward"].getStr) - let expiry = UInt256.fromHex(json["expiry"].getStr).catch.option - let nodes = strutils.fromHex[uint](json["nodes"].getStr).catch.option - let tolerance = strutils.fromHex[uint](json["tolerance"].getStr).catch.option - success StorageRequestParams( - duration: duration, - reward: reward, - expiry: expiry, - nodes: nodes, - tolerance: tolerance + RestAvailability* = object + totalSize* {.serialize.}: UInt256 + duration* {.serialize.}: UInt256 + minPrice* {.serialize.}: UInt256 + maxCollateral* {.serialize.}: UInt256 + freeSize* {.serialize.}: ?UInt256 + + RestSalesAgent* = object + state* {.serialize.}: string + requestId* {.serialize.}: RequestId + slotIndex* {.serialize.}: UInt256 + + RestContent* = object + cid* {.serialize.}: Cid + manifest* {.serialize.}: Manifest + + RestContentList* = object + content* {.serialize.}: seq[RestContent] + + RestNode* = object + nodeId* {.serialize.}: RestNodeId + peerId* {.serialize.}: PeerId + record* {.serialize.}: SignedPeerRecord + address* {.serialize.}: Option[dn.Address] + seen* {.serialize.}: bool + + RestRoutingTable* = object + localNode* {.serialize.}: RestNode + nodes* {.serialize.}: seq[RestNode] + + RestPeerRecord* = object + peerId* {.serialize.}: PeerId + seqNo* {.serialize.}: uint64 + addresses* {.serialize.}: seq[AddressInfo] + + RestNodeId* = object + id*: NodeId + + RestRepoStore* = object + totalBlocks* {.serialize.}: Natural + quotaMaxBytes* {.serialize.}: NBytes + quotaUsedBytes* {.serialize.}: NBytes + quotaReservedBytes* {.serialize.}: NBytes + +proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList = + RestContentList( + content: content ) -func `%`*(address: Address): JsonNode = - % $address +proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent = + RestContent( + cid: cid, + manifest: manifest + ) -func `%`*(stint: StInt|StUInt): JsonNode = - %("0x" & stint.toHex) +proc init*(_: type RestNode, node: dn.Node): RestNode = + RestNode( + nodeId: RestNodeId.init(node.id), + peerId: node.record.data.peerId, + record: node.record, + address: node.address, + seen: node.seen + ) -func `%`*(arr: openArray[byte]): JsonNode = - %("0x" & arr.toHex) +proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable = + var nodes: seq[RestNode] = @[] + for bucket in routingTable.buckets: + for node in bucket.nodes: + nodes.add(RestNode.init(node)) -func `%`*(id: RequestId | SlotId | Nonce): JsonNode = - % id.toArray + RestRoutingTable( + localNode: RestNode.init(routingTable.localNode), + nodes: nodes + ) -func `%`*(purchase: Purchase): JsonNode = - %*{ - "state": (purchase.state as PurchaseState).?description |? "none", - "error": purchase.error.?msg, - "request": purchase.request, - } +proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord = + RestPeerRecord( + peerId: peerRecord.peerId, + seqNo: peerRecord.seqNo, + addresses: peerRecord.addresses + ) + +proc init*(_: type RestNodeId, id: NodeId): RestNodeId = + RestNodeId( + id: id + ) + +proc `%`*(obj: StorageRequest | Slot): JsonNode = + let jsonObj = newJObject() + for k, v in obj.fieldPairs: jsonObj[k] = %v + jsonObj["id"] = %(obj.id) + + return jsonObj + +proc `%`*(obj: RestNodeId): JsonNode = % $obj.id diff --git a/codex/sales.nim b/codex/sales.nim index 4758e1a1..c4fcb217 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -1,14 +1,23 @@ import std/sequtils +import std/sugar import pkg/questionable -import pkg/upraises +import pkg/questionable/results import pkg/stint -import pkg/nimcrypto -import pkg/chronicles -import ./rng +import pkg/datastore import ./market import ./clock -import ./proving +import ./stores import ./contracts/requests +import ./contracts/marketplace +import ./logutils +import ./sales/salescontext +import ./sales/salesagent +import ./sales/statemachine +import ./sales/slotqueue +import ./sales/states/preparing +import ./sales/states/unknown +import ./utils/then +import ./utils/trackedfutures ## Sales holds a list of available storage that it may sell. ## @@ -29,214 +38,476 @@ import ./contracts/requests ## | | ---- storage proof ---> | export stint +export reservations +export salesagent +export salescontext + +logScope: + topics = "sales marketplace" type Sales* = ref object - market: Market - clock: Clock - subscription: ?market.Subscription - available*: seq[Availability] - onStore: ?OnStore - onProve: ?OnProve - onClear: ?OnClear - onSale: ?OnSale - proving: Proving - Availability* = object - id*: array[32, byte] - size*: UInt256 - duration*: UInt256 - minPrice*: UInt256 - SalesAgent = ref object - sales: Sales - requestId: RequestId - ask: StorageAsk - availability: Availability - request: ?StorageRequest - slotIndex: ?UInt256 - subscription: ?market.Subscription - running: ?Future[void] - waiting: ?Future[void] - finished: bool - OnStore = proc(request: StorageRequest, - slot: UInt256, - availability: Availability): Future[void] {.gcsafe, upraises: [].} - OnProve = proc(request: StorageRequest, - slot: UInt256): Future[seq[byte]] {.gcsafe, upraises: [].} - OnClear = proc(availability: Availability, - request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSale = proc(availability: Availability, - request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} - -func new*(_: type Sales, - market: Market, - clock: Clock, - proving: Proving): Sales = - Sales( - market: market, - clock: clock, - proving: proving - ) - -proc init*(_: type Availability, - size: UInt256, - duration: UInt256, - minPrice: UInt256): Availability = - var id: array[32, byte] - doAssert randomBytes(id) == 32 - Availability(id: id, size: size, duration: duration, minPrice: minPrice) + context*: SalesContext + agents*: seq[SalesAgent] + running: bool + subscriptions: seq[market.Subscription] + trackedFutures: TrackedFutures proc `onStore=`*(sales: Sales, onStore: OnStore) = - sales.onStore = some onStore - -proc `onProve=`*(sales: Sales, onProve: OnProve) = - sales.onProve = some onProve + sales.context.onStore = some onStore proc `onClear=`*(sales: Sales, onClear: OnClear) = - sales.onClear = some onClear + sales.context.onClear = some onClear proc `onSale=`*(sales: Sales, callback: OnSale) = - sales.onSale = some callback + sales.context.onSale = some callback -func add*(sales: Sales, availability: Availability) = - sales.available.add(availability) +proc `onProve=`*(sales: Sales, callback: OnProve) = + sales.context.onProve = some callback -func remove*(sales: Sales, availability: Availability) = - sales.available.keepItIf(it != availability) +proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) = + sales.context.onExpiryUpdate = some callback -func findAvailability(sales: Sales, ask: StorageAsk): ?Availability = - for availability in sales.available: - if ask.slotSize <= availability.size and - ask.duration <= availability.duration and - ask.pricePerSlot >= availability.minPrice: - return some availability +proc onStore*(sales: Sales): ?OnStore = sales.context.onStore -proc finish(agent: SalesAgent, success: bool) = - if agent.finished: - return +proc onClear*(sales: Sales): ?OnClear = sales.context.onClear - agent.finished = true +proc onSale*(sales: Sales): ?OnSale = sales.context.onSale - if subscription =? agent.subscription: - asyncSpawn subscription.unsubscribe() +proc onProve*(sales: Sales): ?OnProve = sales.context.onProve - if running =? agent.running: - running.cancel() +proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate - if waiting =? agent.waiting: - waiting.cancel() +proc new*(_: type Sales, + market: Market, + clock: Clock, + repo: RepoStore): Sales = + Sales.new(market, clock, repo, 0) - if success: - if request =? agent.request and - slotIndex =? agent.slotIndex: - agent.sales.proving.add(request.slotId(slotIndex)) +proc new*(_: type Sales, + market: Market, + clock: Clock, + repo: RepoStore, + simulateProofFailures: int): Sales = - if onSale =? agent.sales.onSale: - onSale(agent.availability, request, slotIndex) - else: - if onClear =? agent.sales.onClear and - request =? agent.request and - slotIndex =? agent.slotIndex: - onClear(agent.availability, request, slotIndex) - agent.sales.add(agent.availability) - -proc selectSlot(agent: SalesAgent) = - let rng = Rng.instance - let slotIndex = rng.rand(agent.ask.slots - 1) - agent.slotIndex = some slotIndex.u256 - -proc onSlotFilled(agent: SalesAgent, - requestId: RequestId, - slotIndex: UInt256) {.async.} = - try: - let market = agent.sales.market - let host = await market.getHost(requestId, slotIndex) - let me = await market.getSigner() - agent.finish(success = (host == me.some)) - except CatchableError: - agent.finish(success = false) - -proc subscribeSlotFilled(agent: SalesAgent, slotIndex: UInt256) {.async.} = - proc onSlotFilled(requestId: RequestId, - slotIndex: UInt256) {.gcsafe, upraises:[].} = - asyncSpawn agent.onSlotFilled(requestId, slotIndex) - let market = agent.sales.market - let subscription = await market.subscribeSlotFilled(agent.requestId, - slotIndex, - onSlotFilled) - agent.subscription = some subscription - -proc waitForExpiry(agent: SalesAgent) {.async.} = - without request =? agent.request: - return - await agent.sales.clock.waitUntil(request.expiry.truncate(int64)) - agent.finish(success = false) - -proc start(agent: SalesAgent) {.async.} = - try: - let sales = agent.sales - let market = sales.market - let availability = agent.availability - - without onStore =? sales.onStore: - raiseAssert "onStore callback not set" - - without onProve =? sales.onProve: - raiseAssert "onProve callback not set" - - sales.remove(availability) - - agent.selectSlot() - without slotIndex =? agent.slotIndex: - raiseAssert "no slot selected" - - await agent.subscribeSlotFilled(slotIndex) - - agent.request = await market.getRequest(agent.requestId) - without request =? agent.request: - agent.finish(success = false) - return - - agent.waiting = some agent.waitForExpiry() - - await onStore(request, slotIndex, availability) - let proof = await onProve(request, slotIndex) - await market.fillSlot(request.id, slotIndex, proof) - except CancelledError: - raise - except CatchableError as e: - error "SalesAgent failed", msg = e.msg - agent.finish(success = false) - -proc handleRequest(sales: Sales, requestId: RequestId, ask: StorageAsk) = - without availability =? sales.findAvailability(ask): - return - - let agent = SalesAgent( - sales: sales, - requestId: requestId, - ask: ask, - availability: availability + let reservations = Reservations.new(repo) + Sales( + context: SalesContext( + market: market, + clock: clock, + reservations: reservations, + slotQueue: SlotQueue.new(), + simulateProofFailures: simulateProofFailures + ), + trackedFutures: TrackedFutures.new(), + subscriptions: @[] ) - agent.running = some agent.start() +proc remove(sales: Sales, agent: SalesAgent) {.async.} = + await agent.stop() + if sales.running: + sales.agents.keepItIf(it != agent) -proc start*(sales: Sales) {.async.} = - doAssert sales.subscription.isNone, "Sales already started" +proc cleanUp(sales: Sales, + agent: SalesAgent, + returnBytes: bool, + reprocessSlot: bool, + processing: Future[void]) {.async.} = - proc onRequest(requestId: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].} = - sales.handleRequest(requestId, ask) + let data = agent.data + + logScope: + topics = "sales cleanUp" + requestId = data.requestId + slotIndex = data.slotIndex + reservationId = data.reservation.?id |? ReservationId.default + availabilityId = data.reservation.?availabilityId |? AvailabilityId.default + + trace "cleaning up sales agent" + + # if reservation for the SalesAgent was not created, then it means + # that the cleanUp was called before the sales process really started, so + # there are not really any bytes to be returned + if returnBytes and request =? data.request and reservation =? data.reservation: + if returnErr =? (await sales.context.reservations.returnBytesToAvailability( + reservation.availabilityId, + reservation.id, + request.ask.slotSize + )).errorOption: + error "failure returning bytes", + error = returnErr.msg, + bytes = request.ask.slotSize + + # delete reservation and return reservation bytes back to the availability + if reservation =? data.reservation and + deleteErr =? (await sales.context.reservations.deleteReservation( + reservation.id, + reservation.availabilityId + )).errorOption: + error "failure deleting reservation", error = deleteErr.msg + + # Re-add items back into the queue to prevent small availabilities from + # draining the queue. Seen items will be ordered last. + if reprocessSlot and request =? data.request: + let queue = sales.context.slotQueue + var seenItem = SlotQueueItem.init(data.requestId, + data.slotIndex.truncate(uint16), + data.ask, + request.expiry, + seen = true) + trace "pushing ignored item to queue, marked as seen" + if err =? queue.push(seenItem).errorOption: + error "failed to readd slot to queue", + errorType = $(type err), error = err.msg + + await sales.remove(agent) + + # signal back to the slot queue to cycle a worker + if not processing.isNil and not processing.finished(): + processing.complete() + +proc filled( + sales: Sales, + request: StorageRequest, + slotIndex: UInt256, + processing: Future[void]) = + + if onSale =? sales.context.onSale: + onSale(request, slotIndex) + + # signal back to the slot queue to cycle a worker + if not processing.isNil and not processing.finished(): + processing.complete() + +proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = + debug "processing slot from queue", requestId = item.requestId, + slot = item.slotIndex + + let agent = newSalesAgent( + sales.context, + item.requestId, + item.slotIndex.u256, + none StorageRequest + ) + + agent.onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = + await sales.cleanUp(agent, returnBytes, reprocessSlot, done) + + agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) = + sales.filled(request, slotIndex, done) + + agent.start(SalePreparing()) + sales.agents.add agent + +proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} = + let reservations = sales.context.reservations + without reservs =? await reservations.all(Reservation): + info "no unused reservations found for deletion" + + let unused = reservs.filter(r => ( + let slotId = slotId(r.requestId, r.slotIndex) + not activeSlots.any(slot => slot.id == slotId) + )) + info "found unused reservations for deletion", unused = unused.len + + for reservation in unused: + + logScope: + reservationId = reservation.id + availabilityId = reservation.availabilityId + + if err =? (await reservations.deleteReservation( + reservation.id, reservation.availabilityId + )).errorOption: + error "failed to delete unused reservation", error = err.msg + else: + trace "deleted unused reservation" + +proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} = + let market = sales.context.market + let slotIds = await market.mySlots() + var slots: seq[Slot] = @[] + + info "Loading active slots", slotsCount = len(slots) + for slotId in slotIds: + if slot =? (await market.getActiveSlot(slotId)): + slots.add slot + + return slots + +proc activeSale*(sales: Sales, slotId: SlotId): Future[?SalesAgent] {.async.} = + for agent in sales.agents: + if slotId(agent.data.requestId, agent.data.slotIndex) == slotId: + return some agent + + return none SalesAgent + +proc load*(sales: Sales) {.async.} = + let activeSlots = await sales.mySlots() + + await sales.deleteInactiveReservations(activeSlots) + + for slot in activeSlots: + let agent = newSalesAgent( + sales.context, + slot.request.id, + slot.slotIndex, + some slot.request) + + agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = + # since workers are not being dispatched, this future has not been created + # by a worker. Create a dummy one here so we can call sales.cleanUp + let done: Future[void] = nil + await sales.cleanUp(agent, returnBytes, reprocessSlot, done) + + # There is no need to assign agent.onFilled as slots loaded from `mySlots` + # are inherently already filled and so assigning agent.onFilled would be + # superfluous. + + agent.start(SaleUnknown()) + sales.agents.add agent + +proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = + ## When availabilities are modified or added, the queue should be unpaused if + ## it was paused and any slots in the queue should have their `seen` flag + ## cleared. + let queue = sales.context.slotQueue + + queue.clearSeenFlags() + if queue.paused: + trace "unpausing queue after new availability added" + queue.unpause() + +proc onStorageRequested(sales: Sales, + requestId: RequestId, + ask: StorageAsk, + expiry: UInt256) = + + logScope: + topics = "marketplace sales onStorageRequested" + requestId + slots = ask.slots + expiry + + let slotQueue = sales.context.slotQueue + + trace "storage requested, adding slots to queue" + + without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err: + if err of SlotsOutOfRangeError: + warn "Too many slots, cannot add to queue" + else: + warn "Failed to create slot queue items from request", error = err.msg + return + + for item in items: + # continue on failure + if err =? slotQueue.push(item).errorOption: + if err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists" + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running" + else: + warn "Error adding request to SlotQueue", error = err.msg + +proc onSlotFreed(sales: Sales, + requestId: RequestId, + slotIndex: UInt256) = + + logScope: + topics = "marketplace sales onSlotFreed" + requestId + slotIndex + + trace "slot freed, adding to queue" + + proc addSlotToQueue() {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + # first attempt to populate request using existing slot metadata in queue + without var found =? queue.populateItem(requestId, + slotIndex.truncate(uint16)): + trace "no existing request metadata, getting request info from contract" + # if there's no existing slot for that request, retrieve the request + # from the contract. + without request =? await market.getRequest(requestId): + error "unknown request in contract" + return + + found = SlotQueueItem.init(request, slotIndex.truncate(uint16)) + + if err =? queue.push(found).errorOption: + raise err + + addSlotToQueue() + .track(sales) + .catch(proc(err: ref CatchableError) = + if err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists" + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running" + else: + warn "Error adding request to SlotQueue", error = err.msg + ) + +proc subscribeRequested(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + + proc onStorageRequested(requestId: RequestId, + ask: StorageAsk, + expiry: UInt256) = + sales.onStorageRequested(requestId, ask, expiry) try: - sales.subscription = some await sales.market.subscribeRequests(onRequest) + let sub = await market.subscribeRequests(onStorageRequested) + sales.subscriptions.add(sub) + except CancelledError as error: + raise error except CatchableError as e: - error "Unable to start sales", msg = e.msg + error "Unable to subscribe to storage request events", msg = e.msg + +proc subscribeCancellation(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onCancelled(requestId: RequestId) = + trace "request cancelled (via contract RequestCancelled event), removing all request slots from queue" + queue.delete(requestId) + + try: + let sub = await market.subscribeRequestCancelled(onCancelled) + sales.subscriptions.add(sub) + except CancelledError as error: + raise error + except CatchableError as e: + error "Unable to subscribe to cancellation events", msg = e.msg + +proc subscribeFulfilled*(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onFulfilled(requestId: RequestId) = + trace "request fulfilled, removing all request slots from queue" + queue.delete(requestId) + + for agent in sales.agents: + agent.onFulfilled(requestId) + + try: + let sub = await market.subscribeFulfillment(onFulfilled) + sales.subscriptions.add(sub) + except CancelledError as error: + raise error + except CatchableError as e: + error "Unable to subscribe to storage fulfilled events", msg = e.msg + +proc subscribeFailure(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onFailed(requestId: RequestId) = + trace "request failed, removing all request slots from queue" + queue.delete(requestId) + + for agent in sales.agents: + agent.onFailed(requestId) + + try: + let sub = await market.subscribeRequestFailed(onFailed) + sales.subscriptions.add(sub) + except CancelledError as error: + raise error + except CatchableError as e: + error "Unable to subscribe to storage failure events", msg = e.msg + +proc subscribeSlotFilled(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + trace "slot filled, removing from slot queue", requestId, slotIndex + queue.delete(requestId, slotIndex.truncate(uint16)) + + for agent in sales.agents: + agent.onSlotFilled(requestId, slotIndex) + + try: + let sub = await market.subscribeSlotFilled(onSlotFilled) + sales.subscriptions.add(sub) + except CancelledError as error: + raise error + except CatchableError as e: + error "Unable to subscribe to slot filled events", msg = e.msg + +proc subscribeSlotFreed(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + + proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) = + sales.onSlotFreed(requestId, slotIndex) + + try: + let sub = await market.subscribeSlotFreed(onSlotFreed) + sales.subscriptions.add(sub) + except CancelledError as error: + raise error + except CatchableError as e: + error "Unable to subscribe to slot freed events", msg = e.msg + +proc startSlotQueue(sales: Sales) {.async.} = + let slotQueue = sales.context.slotQueue + let reservations = sales.context.reservations + + slotQueue.onProcessSlot = + proc(item: SlotQueueItem, done: Future[void]) {.async.} = + trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex + sales.processSlot(item, done) + + asyncSpawn slotQueue.start() + + proc onAvailabilityAdded(availability: Availability) {.async.} = + await sales.onAvailabilityAdded(availability) + + reservations.onAvailabilityAdded = onAvailabilityAdded + +proc subscribe(sales: Sales) {.async.} = + await sales.subscribeRequested() + await sales.subscribeFulfilled() + await sales.subscribeFailure() + await sales.subscribeSlotFilled() + await sales.subscribeSlotFreed() + await sales.subscribeCancellation() + +proc unsubscribe(sales: Sales) {.async.} = + for sub in sales.subscriptions: + try: + await sub.unsubscribe() + except CancelledError as error: + raise error + except CatchableError as e: + error "Unable to unsubscribe from subscription", error = e.msg + +proc start*(sales: Sales) {.async.} = + await sales.load() + await sales.startSlotQueue() + await sales.subscribe() + sales.running = true proc stop*(sales: Sales) {.async.} = - if subscription =? sales.subscription: - sales.subscription = market.Subscription.none - try: - await subscription.unsubscribe() - except CatchableError as e: - warn "Unsubscribe failed", msg = e.msg + trace "stopping sales" + sales.running = false + await sales.context.slotQueue.stop() + await sales.unsubscribe() + await sales.trackedFutures.cancelTracked() + + for agent in sales.agents: + await agent.stop() + + sales.agents = @[] diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim new file mode 100644 index 00000000..0b5eaaf5 --- /dev/null +++ b/codex/sales/reservations.nim @@ -0,0 +1,669 @@ +## Nim-Codex +## Copyright (c) 2022 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. +## +## +--------------------------------------+ +## | RESERVATION | +## +----------------------------------------+ |--------------------------------------| +## | AVAILABILITY | | ReservationId | id | PK | +## |----------------------------------------| |--------------------------------------| +## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK | +## |----------------------------------------| |--------------------------------------| +## | UInt256 | totalSize | | | UInt256 | size | | +## |----------------------------------------| |--------------------------------------| +## | UInt256 | freeSize | | | SlotId | slotId | | +## |----------------------------------------| +--------------------------------------+ +## | UInt256 | duration | | +## |----------------------------------------| +## | UInt256 | minPrice | | +## |----------------------------------------| +## | UInt256 | maxCollateral | | +## +----------------------------------------+ + +import pkg/upraises +push: {.upraises: [].} + +import std/sequtils +import std/sugar +import std/typetraits +import std/sequtils +import pkg/chronos +import pkg/datastore +import pkg/nimcrypto +import pkg/questionable +import pkg/questionable/results +import pkg/stint +import pkg/stew/byteutils +import ../codextypes +import ../logutils +import ../clock +import ../stores +import ../market +import ../contracts/requests +import ../utils/json +import ../units + +export requests +export logutils + +logScope: + topics = "sales reservations" + + +type + AvailabilityId* = distinct array[32, byte] + ReservationId* = distinct array[32, byte] + SomeStorableObject = Availability | Reservation + SomeStorableId = AvailabilityId | ReservationId + Availability* = ref object + id* {.serialize.}: AvailabilityId + totalSize* {.serialize.}: UInt256 + freeSize* {.serialize.}: UInt256 + duration* {.serialize.}: UInt256 + minPrice* {.serialize.}: UInt256 + maxCollateral* {.serialize.}: UInt256 + Reservation* = ref object + id* {.serialize.}: ReservationId + availabilityId* {.serialize.}: AvailabilityId + size* {.serialize.}: UInt256 + requestId* {.serialize.}: RequestId + slotIndex* {.serialize.}: UInt256 + Reservations* = ref object of RootObj + availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability + repo: RepoStore + onAvailabilityAdded: ?OnAvailabilityAdded + GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} + IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} + OnAvailabilityAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} + StorableIter* = ref object + finished*: bool + next*: GetNext + dispose*: IterDispose + ReservationsError* = object of CodexError + ReserveFailedError* = object of ReservationsError + ReleaseFailedError* = object of ReservationsError + DeleteFailedError* = object of ReservationsError + GetFailedError* = object of ReservationsError + NotExistsError* = object of ReservationsError + SerializationError* = object of ReservationsError + UpdateFailedError* = object of ReservationsError + BytesOutOfBoundsError* = object of ReservationsError + +const + SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module + ReservationsKey = (SalesKey / "reservations").tryGet + +proc hash*(x: AvailabilityId): Hash {.borrow.} +proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} + +template withLock(lock, body) = + try: + await lock.acquire() + body + finally: + if lock.locked: + lock.release() + + +proc new*(T: type Reservations, + repo: RepoStore): Reservations = + + T(availabilityLock: newAsyncLock(),repo: repo) + +proc init*( + _: type Availability, + totalSize: UInt256, + freeSize: UInt256, + duration: UInt256, + minPrice: UInt256, + maxCollateral: UInt256): Availability = + + var id: array[32, byte] + doAssert randomBytes(id) == 32 + Availability(id: AvailabilityId(id), totalSize:totalSize, freeSize: freeSize, duration: duration, minPrice: minPrice, maxCollateral: maxCollateral) + +proc init*( + _: type Reservation, + availabilityId: AvailabilityId, + size: UInt256, + requestId: RequestId, + slotIndex: UInt256 +): Reservation = + + var id: array[32, byte] + doAssert randomBytes(id) == 32 + Reservation(id: ReservationId(id), availabilityId: availabilityId, size: size, requestId: requestId, slotIndex: slotIndex) + +func toArray(id: SomeStorableId): array[32, byte] = + array[32, byte](id) + +proc `==`*(x, y: AvailabilityId): bool {.borrow.} +proc `==`*(x, y: ReservationId): bool {.borrow.} +proc `==`*(x, y: Reservation): bool = + x.id == y.id +proc `==`*(x, y: Availability): bool = + x.id == y.id + +proc `$`*(id: SomeStorableId): string = id.toArray.toHex + +proc toErr[E1: ref CatchableError, E2: ReservationsError]( + e1: E1, + _: type E2, + msg: string = e1.msg): ref E2 = + + return newException(E2, msg, e1) + +logutils.formatIt(LogFormat.textLines, SomeStorableId): it.short0xHexLog +logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog + +proc `onAvailabilityAdded=`*(self: Reservations, + onAvailabilityAdded: OnAvailabilityAdded) = + self.onAvailabilityAdded = some onAvailabilityAdded + +func key*(id: AvailabilityId): ?!Key = + ## sales / reservations / + (ReservationsKey / $id) + +func key*(reservationId: ReservationId, availabilityId: AvailabilityId): ?!Key = + ## sales / reservations / / + (availabilityId.key / $reservationId) + +func key*(availability: Availability): ?!Key = + return availability.id.key + +func key*(reservation: Reservation): ?!Key = + return key(reservation.id, reservation.availabilityId) + +func available*(self: Reservations): uint = self.repo.available.uint + +func hasAvailable*(self: Reservations, bytes: uint): bool = + self.repo.available(bytes.NBytes) + +proc exists*( + self: Reservations, + key: Key): Future[bool] {.async.} = + + let exists = await self.repo.metaDs.ds.contains(key) + return exists + +proc getImpl( + self: Reservations, + key: Key): Future[?!seq[byte]] {.async.} = + + if not await self.exists(key): + let err = newException(NotExistsError, "object with key " & $key & " does not exist") + return failure(err) + + without serialized =? await self.repo.metaDs.ds.get(key), error: + return failure(error.toErr(GetFailedError)) + + return success serialized + +proc get*( + self: Reservations, + key: Key, + T: type SomeStorableObject): Future[?!T] {.async.} = + + without serialized =? await self.getImpl(key), error: + return failure(error) + + without obj =? T.fromJson(serialized), error: + return failure(error.toErr(SerializationError)) + + return success obj + +proc updateImpl( + self: Reservations, + obj: SomeStorableObject): Future[?!void] {.async.} = + + trace "updating " & $(obj.type), id = obj.id + + without key =? obj.key, error: + return failure(error) + + if err =? (await self.repo.metaDs.ds.put( + key, + @(obj.toJson.toBytes) + )).errorOption: + return failure(err.toErr(UpdateFailedError)) + + return success() + +proc updateAvailability( + self: Reservations, + obj: Availability): Future[?!void] {.async.} = + + logScope: + availabilityId = obj.id + + without key =? obj.key, error: + return failure(error) + + without oldAvailability =? await self.get(key, Availability), err: + if err of NotExistsError: + trace "Creating new Availability" + let res = await self.updateImpl(obj) + # inform subscribers that Availability has been added + if onAvailabilityAdded =? self.onAvailabilityAdded: + # when chronos v4 is implemented, and OnAvailabilityAdded is annotated + # with async:(raises:[]), we can remove this try/catch as we know, with + # certainty, that nothing will be raised + try: + await onAvailabilityAdded(obj) + except CancelledError as e: + raise e + except CatchableError as e: + # we don't have any insight into types of exceptions that + # `onAvailabilityAdded` can raise because it is caller-defined + warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + return res + else: + return failure(err) + + # Sizing of the availability changed, we need to adjust the repo reservation accordingly + if oldAvailability.totalSize != obj.totalSize: + trace "totalSize changed, updating repo reservation" + if oldAvailability.totalSize < obj.totalSize: # storage added + if reserveErr =? (await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes)).errorOption: + return failure(reserveErr.toErr(ReserveFailedError)) + + elif oldAvailability.totalSize > obj.totalSize: # storage removed + if reserveErr =? (await self.repo.release((oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes)).errorOption: + return failure(reserveErr.toErr(ReleaseFailedError)) + + let res = await self.updateImpl(obj) + + if oldAvailability.freeSize < obj.freeSize: # availability added + # inform subscribers that Availability has been modified (with increased + # size) + if onAvailabilityAdded =? self.onAvailabilityAdded: + # when chronos v4 is implemented, and OnAvailabilityAdded is annotated + # with async:(raises:[]), we can remove this try/catch as we know, with + # certainty, that nothing will be raised + try: + await onAvailabilityAdded(obj) + except CancelledError as e: + raise e + except CatchableError as e: + # we don't have any insight into types of exceptions that + # `onAvailabilityAdded` can raise because it is caller-defined + warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + + return res + +proc update*( + self: Reservations, + obj: Reservation): Future[?!void] {.async.} = + return await self.updateImpl(obj) + +proc update*( + self: Reservations, + obj: Availability): Future[?!void] {.async.} = + withLock(self.availabilityLock): + return await self.updateAvailability(obj) + +proc delete( + self: Reservations, + key: Key): Future[?!void] {.async.} = + + trace "deleting object", key + + if not await self.exists(key): + return success() + + if err =? (await self.repo.metaDs.ds.delete(key)).errorOption: + return failure(err.toErr(DeleteFailedError)) + + return success() + +proc deleteReservation*( + self: Reservations, + reservationId: ReservationId, + availabilityId: AvailabilityId): Future[?!void] {.async.} = + + logScope: + reservationId + availabilityId + + trace "deleting reservation" + without key =? key(reservationId, availabilityId), error: + return failure(error) + + withLock(self.availabilityLock): + without reservation =? (await self.get(key, Reservation)), error: + if error of NotExistsError: + return success() + else: + return failure(error) + + if reservation.size > 0.u256: + trace "returning remaining reservation bytes to availability", + size = reservation.size + + without availabilityKey =? availabilityId.key, error: + return failure(error) + + without var availability =? await self.get(availabilityKey, Availability), error: + return failure(error) + + availability.freeSize += reservation.size + + if updateErr =? (await self.updateAvailability(availability)).errorOption: + return failure(updateErr) + + if err =? (await self.repo.metaDs.ds.delete(key)).errorOption: + return failure(err.toErr(DeleteFailedError)) + + return success() + +# TODO: add support for deleting availabilities +# To delete, must not have any active sales. + +proc createAvailability*( + self: Reservations, + size: UInt256, + duration: UInt256, + minPrice: UInt256, + maxCollateral: UInt256): Future[?!Availability] {.async.} = + + trace "creating availability", size, duration, minPrice, maxCollateral + + let availability = Availability.init( + size, size, duration, minPrice, maxCollateral + ) + let bytes = availability.freeSize.truncate(uint) + + if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: + return failure(reserveErr.toErr(ReserveFailedError)) + + if updateErr =? (await self.update(availability)).errorOption: + + # rollback the reserve + trace "rolling back reserve" + if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption: + rollbackErr.parent = updateErr + return failure(rollbackErr) + + return failure(updateErr) + + return success(availability) + +method createReservation*( + self: Reservations, + availabilityId: AvailabilityId, + slotSize: UInt256, + requestId: RequestId, + slotIndex: UInt256 +): Future[?!Reservation] {.async, base.} = + + withLock(self.availabilityLock): + without availabilityKey =? availabilityId.key, error: + return failure(error) + + without availability =? await self.get(availabilityKey, Availability), error: + return failure(error) + + # Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications + if availability.freeSize < slotSize: + let error = newException( + BytesOutOfBoundsError, + "trying to reserve an amount of bytes that is greater than the total size of the Availability") + return failure(error) + + trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex + + let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex) + + if createResErr =? (await self.update(reservation)).errorOption: + return failure(createResErr) + + # reduce availability freeSize by the slot size, which is now accounted for in + # the newly created Reservation + availability.freeSize -= slotSize + + # update availability with reduced size + trace "Updating availability with reduced size" + if updateErr =? (await self.updateAvailability(availability)).errorOption: + trace "Updating availability failed, rolling back reservation creation" + + without key =? reservation.key, keyError: + keyError.parent = updateErr + return failure(keyError) + + # rollback the reservation creation + if rollbackErr =? (await self.delete(key)).errorOption: + rollbackErr.parent = updateErr + return failure(rollbackErr) + + return failure(updateErr) + + trace "Reservation succesfully created" + return success(reservation) + +proc returnBytesToAvailability*( + self: Reservations, + availabilityId: AvailabilityId, + reservationId: ReservationId, + bytes: UInt256): Future[?!void] {.async.} = + + logScope: + reservationId + availabilityId + + withLock(self.availabilityLock): + without key =? key(reservationId, availabilityId), error: + return failure(error) + + without var reservation =? (await self.get(key, Reservation)), error: + return failure(error) + + # We are ignoring bytes that are still present in the Reservation because + # they will be returned to Availability through `deleteReservation`. + let bytesToBeReturned = bytes - reservation.size + + if bytesToBeReturned == 0: + trace "No bytes are returned", requestSizeBytes = bytes, returningBytes = bytesToBeReturned + return success() + + trace "Returning bytes", requestSizeBytes = bytes, returningBytes = bytesToBeReturned + + # First lets see if we can re-reserve the bytes, if the Repo's quota + # is depleted then we will fail-fast as there is nothing to be done atm. + if reserveErr =? (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + return failure(reserveErr.toErr(ReserveFailedError)) + + without availabilityKey =? availabilityId.key, error: + return failure(error) + + without var availability =? await self.get(availabilityKey, Availability), error: + return failure(error) + + availability.freeSize += bytesToBeReturned + + # Update availability with returned size + if updateErr =? (await self.updateAvailability(availability)).errorOption: + + trace "Rolling back returning bytes" + if rollbackErr =? (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + rollbackErr.parent = updateErr + return failure(rollbackErr) + + return failure(updateErr) + + return success() + +proc release*( + self: Reservations, + reservationId: ReservationId, + availabilityId: AvailabilityId, + bytes: uint): Future[?!void] {.async.} = + + logScope: + topics = "release" + bytes + reservationId + availabilityId + + trace "releasing bytes and updating reservation" + + without key =? key(reservationId, availabilityId), error: + return failure(error) + + without var reservation =? (await self.get(key, Reservation)), error: + return failure(error) + + if reservation.size < bytes.u256: + let error = newException( + BytesOutOfBoundsError, + "trying to release an amount of bytes that is greater than the total size of the Reservation") + return failure(error) + + if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption: + return failure(releaseErr.toErr(ReleaseFailedError)) + + reservation.size -= bytes.u256 + + # persist partially used Reservation with updated size + if err =? (await self.update(reservation)).errorOption: + + # rollback release if an update error encountered + trace "rolling back release" + if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: + rollbackErr.parent = err + return failure(rollbackErr) + return failure(err) + + return success() + +iterator items(self: StorableIter): Future[?seq[byte]] = + while not self.finished: + yield self.next() + +proc storables( + self: Reservations, + T: type SomeStorableObject, + queryKey: Key = ReservationsKey +): Future[?!StorableIter] {.async.} = + + var iter = StorableIter() + let query = Query.init(queryKey) + when T is Availability: + # should indicate key length of 4, but let the .key logic determine it + without defaultKey =? AvailabilityId.default.key, error: + return failure(error) + elif T is Reservation: + # should indicate key length of 5, but let the .key logic determine it + without defaultKey =? key(ReservationId.default, AvailabilityId.default), error: + return failure(error) + else: + raiseAssert "unknown type" + + without results =? await self.repo.metaDs.ds.query(query), error: + return failure(error) + + # /sales/reservations + proc next(): Future[?seq[byte]] {.async.} = + await idleAsync() + iter.finished = results.finished + if not results.finished and + res =? (await results.next()) and + res.data.len > 0 and + key =? res.key and + key.namespaces.len == defaultKey.namespaces.len: + + return some res.data + + return none seq[byte] + + proc dispose(): Future[?!void] {.async.} = + return await results.dispose() + + iter.next = next + iter.dispose = dispose + return success iter + +proc allImpl( + self: Reservations, + T: type SomeStorableObject, + queryKey: Key = ReservationsKey +): Future[?!seq[T]] {.async.} = + + var ret: seq[T] = @[] + + without storables =? (await self.storables(T, queryKey)), error: + return failure(error) + + for storable in storables.items: + without bytes =? (await storable): + continue + + without obj =? T.fromJson(bytes), error: + error "json deserialization error", + json = string.fromBytes(bytes), + error = error.msg + continue + + ret.add obj + + return success(ret) + +proc all*( + self: Reservations, + T: type SomeStorableObject +): Future[?!seq[T]] {.async.} = + return await self.allImpl(T) + +proc all*( + self: Reservations, + T: type SomeStorableObject, + availabilityId: AvailabilityId +): Future[?!seq[T]] {.async.} = + without key =? (ReservationsKey / $availabilityId): + return failure("no key") + + return await self.allImpl(T, key) + +proc findAvailability*( + self: Reservations, + size, duration, minPrice, collateral: UInt256 +): Future[?Availability] {.async.} = + + without storables =? (await self.storables(Availability)), e: + error "failed to get all storables", error = e.msg + return none Availability + + for item in storables.items: + if bytes =? (await item) and + availability =? Availability.fromJson(bytes): + + if size <= availability.freeSize and + duration <= availability.duration and + collateral <= availability.maxCollateral and + minPrice >= availability.minPrice: + + trace "availability matched", + id = availability.id, + size, availFreeSize = availability.freeSize, + duration, availDuration = availability.duration, + minPrice, availMinPrice = availability.minPrice, + collateral, availMaxCollateral = availability.maxCollateral + + # TODO: As soon as we're on ARC-ORC, we can use destructors + # to automatically dispose our iterators when they fall out of scope. + # For now: + if err =? (await storables.dispose()).errorOption: + error "failed to dispose storables iter", error = err.msg + return none Availability + return some availability + + trace "availability did not match", + id = availability.id, + size, availFreeSize = availability.freeSize, + duration, availDuration = availability.duration, + minPrice, availMinPrice = availability.minPrice, + collateral, availMaxCollateral = availability.maxCollateral diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim new file mode 100644 index 00000000..81de2d6f --- /dev/null +++ b/codex/sales/salesagent.nim @@ -0,0 +1,139 @@ +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/stint +import pkg/upraises +import ../contracts/requests +import ../errors +import ../logutils +import ./statemachine +import ./salescontext +import ./salesdata +import ./reservations + +export reservations + +logScope: + topics = "marketplace sales" + +type + SalesAgent* = ref object of Machine + context*: SalesContext + data*: SalesData + subscribed: bool + # Slot-level callbacks. + onCleanUp*: OnCleanUp + onFilled*: ?OnFilled + + OnCleanUp* = proc (returnBytes = false, reprocessSlot = false): Future[void] {.gcsafe, upraises: [].} + OnFilled* = proc(request: StorageRequest, + slotIndex: UInt256) {.gcsafe, upraises: [].} + + SalesAgentError = object of CodexError + AllSlotsFilledError* = object of SalesAgentError + +func `==`*(a, b: SalesAgent): bool = + a.data.requestId == b.data.requestId and + a.data.slotIndex == b.data.slotIndex + +proc newSalesAgent*(context: SalesContext, + requestId: RequestId, + slotIndex: UInt256, + request: ?StorageRequest): SalesAgent = + var agent = SalesAgent.new() + agent.context = context + agent.data = SalesData( + requestId: requestId, + slotIndex: slotIndex, + request: request) + return agent + +proc retrieveRequest*(agent: SalesAgent) {.async.} = + let data = agent.data + let market = agent.context.market + if data.request.isNone: + data.request = await market.getRequest(data.requestId) + +proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} = + let data = agent.data + let market = agent.context.market + return await market.requestState(data.requestId) + +func state*(agent: SalesAgent): ?string = + proc description(state: State): string = + $state + agent.query(description) + +proc subscribeCancellation(agent: SalesAgent) {.async.} = + let data = agent.data + let clock = agent.context.clock + + proc onCancelled() {.async.} = + without request =? data.request: + return + + let market = agent.context.market + let expiry = await market.requestExpiresAt(data.requestId) + + while true: + let deadline = max(clock.now, expiry) + 1 + trace "Waiting for request to be cancelled", now=clock.now, expiry=deadline + await clock.waitUntil(deadline) + + without state =? await agent.retrieveRequestState(): + error "Uknown request", requestId = data.requestId + return + + case state + of New: + discard + of RequestState.Cancelled: + agent.schedule(cancelledEvent(request)) + break + of RequestState.Started, RequestState.Finished, RequestState.Failed: + break + + debug "The request is not yet canceled, even though it should be. Waiting for some more time.", currentState = state, now=clock.now + + data.cancelled = onCancelled() + +method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = + if agent.data.requestId == requestId and + not agent.data.cancelled.isNil: + agent.data.cancelled.cancel() + +method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = + without request =? agent.data.request: + return + if agent.data.requestId == requestId: + agent.schedule(failedEvent(request)) + +method onSlotFilled*(agent: SalesAgent, + requestId: RequestId, + slotIndex: UInt256) {.base, gcsafe, upraises: [].} = + + if agent.data.requestId == requestId and + agent.data.slotIndex == slotIndex: + agent.schedule(slotFilledEvent(requestId, slotIndex)) + +proc subscribe*(agent: SalesAgent) {.async.} = + if agent.subscribed: + return + + await agent.subscribeCancellation() + agent.subscribed = true + +proc unsubscribe*(agent: SalesAgent) {.async.} = + if not agent.subscribed: + return + + let data = agent.data + if not data.cancelled.isNil and not data.cancelled.finished: + await data.cancelled.cancelAndWait() + data.cancelled = nil + + agent.subscribed = false + +proc stop*(agent: SalesAgent) {.async.} = + await Machine(agent).stop() + await agent.unsubscribe() diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim new file mode 100644 index 00000000..199aa5fb --- /dev/null +++ b/codex/sales/salescontext.nim @@ -0,0 +1,35 @@ +import pkg/questionable +import pkg/questionable/results +import pkg/upraises + +import ../market +import ../clock +import ./slotqueue +import ./reservations +import ../blocktype as bt + +type + SalesContext* = ref object + market*: Market + clock*: Clock + # Sales-level callbacks. Closure will be overwritten each time a slot is + # processed. + onStore*: ?OnStore + onClear*: ?OnClear + onSale*: ?OnSale + onProve*: ?OnProve + onExpiryUpdate*: ?OnExpiryUpdate + reservations*: Reservations + slotQueue*: SlotQueue + simulateProofFailures*: int + + BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} + OnStore* = proc(request: StorageRequest, + slot: UInt256, + blocksCb: BlocksCb): Future[?!void] {.gcsafe, upraises: [].} + OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.gcsafe, upraises: [].} + OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.gcsafe, upraises: [].} + OnClear* = proc(request: StorageRequest, + slotIndex: UInt256) {.gcsafe, upraises: [].} + OnSale* = proc(request: StorageRequest, + slotIndex: UInt256) {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim new file mode 100644 index 00000000..7fd56149 --- /dev/null +++ b/codex/sales/salesdata.nim @@ -0,0 +1,13 @@ +import pkg/chronos +import ../contracts/requests +import ../market +import ./reservations + +type + SalesData* = ref object + requestId*: RequestId + ask*: StorageAsk + request*: ?StorageRequest + slotIndex*: UInt256 + cancelled*: Future[void] + reservation*: ?Reservation diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim new file mode 100644 index 00000000..198ef80f --- /dev/null +++ b/codex/sales/slotqueue.nim @@ -0,0 +1,458 @@ +import std/sequtils +import std/tables +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/upraises +import ../errors +import ../logutils +import ../rng +import ../utils +import ../contracts/requests +import ../utils/asyncheapqueue +import ../utils/then +import ../utils/trackedfutures + +logScope: + topics = "marketplace slotqueue" + +type + OnProcessSlot* = + proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises:[].} + + # Non-ref obj copies value when assigned, preventing accidental modification + # of values which could cause an incorrect order (eg + # ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated, + # but the heap invariant would no longer be honoured. When non-ref, the + # compiler can ensure that statement will fail). + SlotQueueWorker = object + doneProcessing*: Future[void] + + SlotQueueItem* = object + requestId: RequestId + slotIndex: uint16 + slotSize: UInt256 + duration: UInt256 + reward: UInt256 + collateral: UInt256 + expiry: UInt256 + seen: bool + + # don't need to -1 to prevent overflow when adding 1 (to always allow push) + # because AsyncHeapQueue size is of type `int`, which is larger than `uint16` + SlotQueueSize = range[1'u16..uint16.high] + + SlotQueue* = ref object + maxWorkers: int + onProcessSlot: ?OnProcessSlot + queue: AsyncHeapQueue[SlotQueueItem] + running: bool + workers: AsyncQueue[SlotQueueWorker] + trackedFutures: TrackedFutures + unpaused: AsyncEvent + + SlotQueueError = object of CodexError + SlotQueueItemExistsError* = object of SlotQueueError + SlotQueueItemNotExistsError* = object of SlotQueueError + SlotsOutOfRangeError* = object of SlotQueueError + QueueNotRunningError* = object of SlotQueueError + +# Number of concurrent workers used for processing SlotQueueItems +const DefaultMaxWorkers = 3 + +# Cap slot queue size to prevent unbounded growth and make sifting more +# efficient. Max size is not equivalent to the number of slots a host can +# service, which is limited by host availabilities and new requests circulating +# the network. Additionally, each new request/slot in the network will be +# included in the queue if it is higher priority than any of the exisiting +# items. Older slots should be unfillable over time as other hosts fill the +# slots. +const DefaultMaxSize = 128'u16 + +proc profitability(item: SlotQueueItem): UInt256 = + StorageAsk(collateral: item.collateral, + duration: item.duration, + reward: item.reward, + slotSize: item.slotSize).pricePerSlot + +proc `<`*(a, b: SlotQueueItem): bool = + # for A to have a higher priority than B (in a min queue), A must be less than + # B. + var scoreA: uint8 = 0 + var scoreB: uint8 = 0 + + proc addIf(score: var uint8, condition: bool, addition: int) = + if condition: + score += 1'u8 shl addition + + scoreA.addIf(a.seen < b.seen, 4) + scoreB.addIf(a.seen > b.seen, 4) + + scoreA.addIf(a.profitability > b.profitability, 3) + scoreB.addIf(a.profitability < b.profitability, 3) + + scoreA.addIf(a.collateral < b.collateral, 2) + scoreB.addIf(a.collateral > b.collateral, 2) + + scoreA.addIf(a.expiry > b.expiry, 1) + scoreB.addIf(a.expiry < b.expiry, 1) + + scoreA.addIf(a.slotSize < b.slotSize, 0) + scoreB.addIf(a.slotSize > b.slotSize, 0) + + return scoreA > scoreB + +proc `==`*(a, b: SlotQueueItem): bool = + a.requestId == b.requestId and + a.slotIndex == b.slotIndex + +proc new*(_: type SlotQueue, + maxWorkers = DefaultMaxWorkers, + maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue = + + if maxWorkers <= 0: + raise newException(ValueError, "maxWorkers must be positive") + if maxWorkers.uint16 > maxSize: + raise newException(ValueError, "maxWorkers must be less than maxSize") + + SlotQueue( + maxWorkers: maxWorkers, + # Add 1 to always allow for an extra item to be pushed onto the queue + # temporarily. After push (and sort), the bottom-most item will be deleted + queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1), + running: false, + trackedFutures: TrackedFutures.new(), + unpaused: newAsyncEvent() + ) + # avoid instantiating `workers` in constructor to avoid side effects in + # `newAsyncQueue` procedure + +proc init(_: type SlotQueueWorker): SlotQueueWorker = + SlotQueueWorker( + doneProcessing: newFuture[void]("slotqueue.worker.processing") + ) + +proc init*(_: type SlotQueueItem, + requestId: RequestId, + slotIndex: uint16, + ask: StorageAsk, + expiry: UInt256, + seen = false): SlotQueueItem = + + SlotQueueItem( + requestId: requestId, + slotIndex: slotIndex, + slotSize: ask.slotSize, + duration: ask.duration, + reward: ask.reward, + collateral: ask.collateral, + expiry: expiry, + seen: seen + ) + +proc init*(_: type SlotQueueItem, + request: StorageRequest, + slotIndex: uint16): SlotQueueItem = + + SlotQueueItem.init(request.id, + slotIndex, + request.ask, + request.expiry) + +proc init*(_: type SlotQueueItem, + requestId: RequestId, + ask: StorageAsk, + expiry: UInt256): seq[SlotQueueItem] = + + if not ask.slots.inRange: + raise newException(SlotsOutOfRangeError, "Too many slots") + + var i = 0'u16 + proc initSlotQueueItem: SlotQueueItem = + let item = SlotQueueItem.init(requestId, i, ask, expiry) + inc i + return item + + var items = newSeqWith(ask.slots.int, initSlotQueueItem()) + Rng.instance.shuffle(items) + return items + +proc init*(_: type SlotQueueItem, + request: StorageRequest): seq[SlotQueueItem] = + + return SlotQueueItem.init(request.id, request.ask, request.expiry) + +proc inRange*(val: SomeUnsignedInt): bool = + val.uint16 in SlotQueueSize.low..SlotQueueSize.high + +proc requestId*(self: SlotQueueItem): RequestId = self.requestId +proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex +proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize +proc duration*(self: SlotQueueItem): UInt256 = self.duration +proc reward*(self: SlotQueueItem): UInt256 = self.reward +proc collateral*(self: SlotQueueItem): UInt256 = self.collateral +proc seen*(self: SlotQueueItem): bool = self.seen + +proc running*(self: SlotQueue): bool = self.running + +proc len*(self: SlotQueue): int = self.queue.len + +proc size*(self: SlotQueue): int = self.queue.size - 1 + +proc paused*(self: SlotQueue): bool = not self.unpaused.isSet + +proc `$`*(self: SlotQueue): string = $self.queue + +proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) = + self.onProcessSlot = some onProcessSlot + +proc activeWorkers*(self: SlotQueue): int = + if not self.running: return 0 + + # active = capacity - available + self.maxWorkers - self.workers.len + +proc contains*(self: SlotQueue, item: SlotQueueItem): bool = + self.queue.contains(item) + +proc pause*(self: SlotQueue) = + # set unpaused flag to false -- coroutines will block on unpaused.wait() + self.unpaused.clear() + +proc unpause*(self: SlotQueue) = + # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() + self.unpaused.fire() + +proc populateItem*(self: SlotQueue, + requestId: RequestId, + slotIndex: uint16): ?SlotQueueItem = + + trace "populate item, items in queue", len = self.queue.len + for item in self.queue.items: + trace "populate item search", itemRequestId = item.requestId, requestId + if item.requestId == requestId: + return some SlotQueueItem( + requestId: requestId, + slotIndex: slotIndex, + slotSize: item.slotSize, + duration: item.duration, + reward: item.reward, + collateral: item.collateral, + expiry: item.expiry + ) + return none SlotQueueItem + +proc push*(self: SlotQueue, item: SlotQueueItem): ?!void = + + logScope: + requestId = item.requestId + slotIndex = item.slotIndex + seen = item.seen + + trace "pushing item to queue" + + if not self.running: + let err = newException(QueueNotRunningError, "queue not running") + return failure(err) + + if self.contains(item): + let err = newException(SlotQueueItemExistsError, "item already exists") + return failure(err) + + if err =? self.queue.pushNoWait(item).mapFailure.errorOption: + return failure(err) + + if self.queue.full(): + # delete the last item + self.queue.del(self.queue.size - 1) + + doAssert self.queue.len <= self.queue.size - 1 + + # when slots are pushed to the queue, the queue should be unpaused if it was + # paused + if self.paused and not item.seen: + trace "unpausing queue after new slot pushed" + self.unpause() + + return success() + +proc push*(self: SlotQueue, items: seq[SlotQueueItem]): ?!void = + for item in items: + if err =? self.push(item).errorOption: + return failure(err) + + return success() + +proc findByRequest(self: SlotQueue, requestId: RequestId): seq[SlotQueueItem] = + var items: seq[SlotQueueItem] = @[] + for item in self.queue.items: + if item.requestId == requestId: + items.add item + return items + +proc delete*(self: SlotQueue, item: SlotQueueItem) = + logScope: + requestId = item.requestId + slotIndex = item.slotIndex + + trace "removing item from queue" + + if not self.running: + trace "cannot delete item from queue, queue not running" + return + + self.queue.delete(item) + +proc delete*(self: SlotQueue, requestId: RequestId, slotIndex: uint16) = + let item = SlotQueueItem(requestId: requestId, slotIndex: slotIndex) + self.delete(item) + +proc delete*(self: SlotQueue, requestId: RequestId) = + let items = self.findByRequest(requestId) + for item in items: + self.delete(item) + +proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem = + self.queue[i] + +proc addWorker(self: SlotQueue): ?!void = + if not self.running: + let err = newException(QueueNotRunningError, "queue must be running") + return failure(err) + + trace "adding new worker to worker queue" + + let worker = SlotQueueWorker.init() + try: + discard worker.doneProcessing.track(self) + self.workers.addLastNoWait(worker) + except AsyncQueueFullError: + return failure("failed to add worker, worker queue full") + + return success() + +proc dispatch(self: SlotQueue, + worker: SlotQueueWorker, + item: SlotQueueItem) {.async.} = + logScope: + requestId = item.requestId + slotIndex = item.slotIndex + + if not self.running: + warn "Could not dispatch worker because queue is not running" + return + + if onProcessSlot =? self.onProcessSlot: + try: + discard worker.doneProcessing.track(self) + await onProcessSlot(item, worker.doneProcessing) + await worker.doneProcessing + + if err =? self.addWorker().errorOption: + raise err # catch below + + except QueueNotRunningError as e: + info "could not re-add worker to worker queue, queue not running", + error = e.msg + except CancelledError: + # do not bubble exception up as it is called with `asyncSpawn` which would + # convert the exception into a `FutureDefect` + discard + except CatchableError as e: + # we don't have any insight into types of errors that `onProcessSlot` can + # throw because it is caller-defined + warn "Unknown error processing slot in worker", error = e.msg + +proc clearSeenFlags*(self: SlotQueue) = + # Enumerate all items in the queue, overwriting each item with `seen = false`. + # To avoid issues with new queue items being pushed to the queue while all + # items are being iterated (eg if a new storage request comes in and pushes + # new slots to the queue), this routine must remain synchronous. + + if self.queue.empty: + return + + for item in self.queue.mitems: + item.seen = false # does not maintain the heap invariant + + # force heap reshuffling to maintain the heap invariant + doAssert self.queue.update(self.queue[0]), "slot queue failed to reshuffle" + + trace "all 'seen' flags cleared" + +proc start*(self: SlotQueue) {.async.} = + if self.running: + return + + trace "starting slot queue" + + self.running = true + + # must be called in `start` to avoid sideeffects in `new` + self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers) + + # Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its + # task, a new worker will be pushed to the queue + for i in 0.. 0: + info "Proving with failure rate", rate = context.simulateProofFailures + return some State(SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)) + + return some State(SaleProving()) + + else: + let error = newException(HostMismatchError, "Slot filled by other host") + return some State(SaleErrored(error: error)) diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim new file mode 100644 index 00000000..c96dd0b9 --- /dev/null +++ b/codex/sales/states/filling.nim @@ -0,0 +1,37 @@ +import ../../logutils +import ../../market +import ../statemachine +import ../salesagent +import ./errorhandling +import ./filled +import ./cancelled +import ./failed + +logScope: + topics = "marketplace sales filling" + +type + SaleFilling* = ref object of ErrorHandlingState + proof*: Groth16Proof + +method `$`*(state: SaleFilling): string = "SaleFilling" + +method onCancelled*(state: SaleFilling, request: StorageRequest): ?State = + return some State(SaleCancelled()) + +method onFailed*(state: SaleFilling, request: StorageRequest): ?State = + return some State(SaleFailed()) + +method onSlotFilled*(state: SaleFilling, requestId: RequestId, + slotIndex: UInt256): ?State = + return some State(SaleFilled()) + +method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = + let data = SalesAgent(machine).data + let market = SalesAgent(machine).context.market + without (collateral =? data.request.?ask.?collateral): + raiseAssert "Request not set" + + debug "Filling slot", requestId = data.requestId, slotIndex = data.slotIndex + await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) + debug "Waiting for slot filled event...", requestId = $data.requestId, slotIndex = $data.slotIndex diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim new file mode 100644 index 00000000..59e9244c --- /dev/null +++ b/codex/sales/states/finished.nim @@ -0,0 +1,34 @@ +import pkg/chronos + +import ../../logutils +import ../statemachine +import ../salesagent +import ./errorhandling +import ./cancelled +import ./failed + +logScope: + topics = "marketplace sales finished" + +type + SaleFinished* = ref object of ErrorHandlingState + +method `$`*(state: SaleFinished): string = "SaleFinished" + +method onCancelled*(state: SaleFinished, request: StorageRequest): ?State = + return some State(SaleCancelled()) + +method onFailed*(state: SaleFinished, request: StorageRequest): ?State = + return some State(SaleFailed()) + +method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = + let agent = SalesAgent(machine) + let data = agent.data + + without request =? data.request: + raiseAssert "no sale request" + + info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex + + if onCleanUp =? agent.onCleanUp: + await onCleanUp() diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim new file mode 100644 index 00000000..7a70fb20 --- /dev/null +++ b/codex/sales/states/ignored.nim @@ -0,0 +1,23 @@ +import pkg/chronos + +import ../../logutils +import ../statemachine +import ../salesagent +import ./errorhandling + +logScope: + topics = "marketplace sales ignored" + +type + SaleIgnored* = ref object of ErrorHandlingState + +method `$`*(state: SaleIgnored): string = "SaleIgnored" + +method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} = + let agent = SalesAgent(machine) + + if onCleanUp =? agent.onCleanUp: + # Ignored slots mean there was no availability. In order to prevent small + # availabilities from draining the queue, mark this slot as seen and re-add + # back into the queue. + await onCleanUp(reprocessSlot = true) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim new file mode 100644 index 00000000..4a5b8515 --- /dev/null +++ b/codex/sales/states/initialproving.nim @@ -0,0 +1,63 @@ +import pkg/questionable/results +import ../../clock +import ../../logutils +import ../statemachine +import ../salesagent +import ./errorhandling +import ./filling +import ./cancelled +import ./errored +import ./failed + +logScope: + topics = "marketplace sales initial-proving" + +type + SaleInitialProving* = ref object of ErrorHandlingState + +method `$`*(state: SaleInitialProving): string = "SaleInitialProving" + +method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State = + return some State(SaleCancelled()) + +method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State = + return some State(SaleFailed()) + +proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} = + trace "Waiting until next period" + let period = periodicity.periodOf(clock.now().u256) + let periodEnd = periodicity.periodEnd(period).truncate(int64) + await clock.waitUntil(periodEnd + 1) + +proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} = + let periodicity = await market.periodicity() + let downtime = await market.proofDowntime() + await clock.waitUntilNextPeriod(periodicity) + while (await market.getPointer(slotId)) > (256 - downtime): + await clock.waitUntilNextPeriod(periodicity) + +method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async.} = + let data = SalesAgent(machine).data + let context = SalesAgent(machine).context + let market = context.market + let clock = context.clock + + without request =? data.request: + raiseAssert "no sale request" + + without onProve =? context.onProve: + raiseAssert "onProve callback not set" + + debug "Waiting for a proof challenge that is valid for the entire period" + let slot = Slot(request: request, slotIndex: data.slotIndex) + await waitForStableChallenge(market, clock, slot.id) + + debug "Generating initial proof", requestId = data.requestId + let challenge = await context.market.getChallenge(slot.id) + without proof =? (await onProve(slot, challenge)), err: + error "Failed to generate initial proof", error = err.msg + return some State(SaleErrored(error: err)) + + debug "Finished proof calculation", requestId = data.requestId + + return some State(SaleFilling(proof: proof)) diff --git a/codex/sales/states/payout.nim b/codex/sales/states/payout.nim new file mode 100644 index 00000000..5c8c2859 --- /dev/null +++ b/codex/sales/states/payout.nim @@ -0,0 +1,35 @@ +import ../../logutils +import ../../market +import ../statemachine +import ../salesagent +import ./errorhandling +import ./cancelled +import ./failed +import ./finished + +logScope: + topics = "marketplace sales payout" + +type + SalePayout* = ref object of ErrorHandlingState + +method `$`*(state: SalePayout): string = "SalePayout" + +method onCancelled*(state: SalePayout, request: StorageRequest): ?State = + return some State(SaleCancelled()) + +method onFailed*(state: SalePayout, request: StorageRequest): ?State = + return some State(SaleFailed()) + +method run(state: SalePayout, machine: Machine): Future[?State] {.async.} = + let data = SalesAgent(machine).data + let market = SalesAgent(machine).context.market + + without request =? data.request: + raiseAssert "no sale request" + + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Collecting finished slot's reward", requestId = data.requestId, slotIndex = data.slotIndex + await market.freeSlot(slot.id) + + return some State(SaleFinished()) diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim new file mode 100644 index 00000000..e5a441d3 --- /dev/null +++ b/codex/sales/states/preparing.nim @@ -0,0 +1,98 @@ +import pkg/questionable +import pkg/questionable/results +import pkg/metrics + +import ../../logutils +import ../../market +import ../salesagent +import ../statemachine +import ./errorhandling +import ./cancelled +import ./failed +import ./filled +import ./ignored +import ./downloading +import ./errored + +declareCounter(codex_reservations_availability_mismatch, "codex reservations availability_mismatch") + +type + SalePreparing* = ref object of ErrorHandlingState + +logScope: + topics = "marketplace sales preparing" + +method `$`*(state: SalePreparing): string = "SalePreparing" + +method onCancelled*(state: SalePreparing, request: StorageRequest): ?State = + return some State(SaleCancelled()) + +method onFailed*(state: SalePreparing, request: StorageRequest): ?State = + return some State(SaleFailed()) + +method onSlotFilled*(state: SalePreparing, requestId: RequestId, + slotIndex: UInt256): ?State = + return some State(SaleFilled()) + +method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = + let agent = SalesAgent(machine) + let data = agent.data + let context = agent.context + let market = context.market + let reservations = context.reservations + + await agent.retrieveRequest() + await agent.subscribe() + + without request =? data.request: + raiseAssert "no sale request" + + let slotId = slotId(data.requestId, data.slotIndex) + let state = await market.slotState(slotId) + if state != SlotState.Free: + return some State(SaleIgnored()) + + # TODO: Once implemented, check to ensure the host is allowed to fill the slot, + # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) + + logScope: + slotIndex = data.slotIndex + slotSize = request.ask.slotSize + duration = request.ask.duration + pricePerSlot = request.ask.pricePerSlot + + # availability was checked for this slot when it entered the queue, however + # check to the ensure that there is still availability as they may have + # changed since being added (other slots may have been processed in that time) + without availability =? await reservations.findAvailability( + request.ask.slotSize, + request.ask.duration, + request.ask.pricePerSlot, + request.ask.collateral): + debug "no availability found for request, ignoring" + + return some State(SaleIgnored()) + + info "availability found for request, creating reservation" + + without reservation =? await reservations.createReservation( + availability.id, + request.ask.slotSize, + request.id, + data.slotIndex + ), error: + trace "Creation of reservation failed" + # Race condition: + # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. + # Should createReservation fail because there's no space, we proceed to SaleIgnored. + if error of BytesOutOfBoundsError: + # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it + codex_reservations_availability_mismatch.inc() + return some State(SaleIgnored()) + + return some State(SaleErrored(error: error)) + + trace "Reservation created succesfully" + + data.reservation = some reservation + return some State(SaleDownloading()) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim new file mode 100644 index 00000000..dd05ac7f --- /dev/null +++ b/codex/sales/states/proving.nim @@ -0,0 +1,149 @@ +import std/options +import pkg/questionable/results +import ../../clock +import ../../logutils +import ../../utils/exceptions +import ../statemachine +import ../salesagent +import ../salescontext +import ./errorhandling +import ./cancelled +import ./failed +import ./errored +import ./payout + +logScope: + topics = "marketplace sales proving" + +type + SlotNotFilledError* = object of CatchableError + SaleProving* = ref object of ErrorHandlingState + loop: Future[void] + +method prove*( + state: SaleProving, + slot: Slot, + challenge: ProofChallenge, + onProve: OnProve, + market: Market, + currentPeriod: Period +) {.base, async.} = + try: + without proof =? (await onProve(slot, challenge)), err: + error "Failed to generate proof", error = err.msg + # In this state, there's nothing we can do except try again next time. + return + debug "Submitting proof", currentPeriod = currentPeriod, slotId = slot.id + await market.submitProof(slot.id, proof) + except CancelledError as error: + trace "Submitting proof cancelled" + raise error + except CatchableError as e: + error "Submitting proof failed", msg = e.msgDetail + +proc proveLoop( + state: SaleProving, + market: Market, + clock: Clock, + request: StorageRequest, + slotIndex: UInt256, + onProve: OnProve +) {.async.} = + + let slot = Slot(request: request, slotIndex: slotIndex) + let slotId = slot.id + + logScope: + period = currentPeriod + requestId = request.id + slotIndex + slotId = slot.id + + proc getCurrentPeriod(): Future[Period] {.async.} = + let periodicity = await market.periodicity() + return periodicity.periodOf(clock.now().u256) + + proc waitUntilPeriod(period: Period) {.async.} = + let periodicity = await market.periodicity() + # Ensure that we're past the period boundary by waiting an additional second + await clock.waitUntil(periodicity.periodStart(period).truncate(int64) + 1) + + while true: + let currentPeriod = await getCurrentPeriod() + let slotState = await market.slotState(slot.id) + + case slotState + of SlotState.Filled: + debug "Proving for new period", period = currentPeriod + if (await market.isProofRequired(slotId)) or (await market.willProofBeRequired(slotId)): + let challenge = await market.getChallenge(slotId) + debug "Proof is required", period = currentPeriod, challenge = challenge + await state.prove(slot, challenge, onProve, market, currentPeriod) + of SlotState.Cancelled: + debug "Slot reached cancelled state" + # do nothing, let onCancelled callback take care of it + of SlotState.Failed: + debug "Slot reached failed state" + # do nothing, let onFailed callback take care of it + of SlotState.Finished: + debug "Slot reached finished state", period = currentPeriod + return # exit the loop + else: + let message = "Slot is not in Filled state, but in state: " & $slotState + raise newException(SlotNotFilledError, message) + + debug "waiting until next period" + await waitUntilPeriod(currentPeriod + 1) + +method `$`*(state: SaleProving): string = "SaleProving" + +method onCancelled*(state: SaleProving, request: StorageRequest): ?State = + # state.loop cancellation happens automatically when run is cancelled due to + # state change + return some State(SaleCancelled()) + +method onFailed*(state: SaleProving, request: StorageRequest): ?State = + # state.loop cancellation happens automatically when run is cancelled due to + # state change + return some State(SaleFailed()) + +method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = + let data = SalesAgent(machine).data + let context = SalesAgent(machine).context + + without request =? data.request: + raiseAssert "no sale request" + + without onProve =? context.onProve: + raiseAssert "onProve callback not set" + + without market =? context.market: + raiseAssert("market not set") + + without clock =? context.clock: + raiseAssert("clock not set") + + debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex + try: + let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) + state.loop = loop + await loop + except CancelledError: + discard + except CatchableError as e: + error "Proving failed", msg = e.msg + return some State(SaleErrored(error: e)) + finally: + # Cleanup of the proving loop + debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex + + if not state.loop.isNil: + if not state.loop.finished: + try: + await state.loop.cancelAndWait() + except CatchableError as e: + error "Error during cancellation of proving loop", msg = e.msg + + state.loop = nil + + return some State(SalePayout()) diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim new file mode 100644 index 00000000..e194eec2 --- /dev/null +++ b/codex/sales/states/provingsimulated.nim @@ -0,0 +1,44 @@ +import ../../conf +when codex_enable_proof_failures: + import std/strutils + import pkg/stint + import pkg/ethers + import pkg/ethers/testing + + import ../../contracts/requests + import ../../logutils + import ../../market + import ../../utils/exceptions + import ../salescontext + import ./proving + + logScope: + topics = "marketplace sales simulated-proving" + + type + SaleProvingSimulated* = ref object of SaleProving + failEveryNProofs*: int + proofCount: int + + proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = + error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail + + method prove*(state: SaleProvingSimulated, slot: Slot, challenge: ProofChallenge, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} = + trace "Processing proving in simulated mode" + state.proofCount += 1 + if state.failEveryNProofs > 0 and + state.proofCount mod state.failEveryNProofs == 0: + state.proofCount = 0 + + try: + warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id + await market.submitProof(slot.id, Groth16Proof.default) + except MarketError as e: + if not e.msg.contains("Invalid proof"): + onSubmitProofError(e, currentPeriod, slot.id) + except CancelledError as error: + raise error + except CatchableError as e: + onSubmitProofError(e, currentPeriod, slot.id) + else: + await procCall SaleProving(state).prove(slot, challenge, onProve, market, currentPeriod) diff --git a/codex/sales/states/unknown.nim b/codex/sales/states/unknown.nim new file mode 100644 index 00000000..db00f517 --- /dev/null +++ b/codex/sales/states/unknown.nim @@ -0,0 +1,52 @@ +import ../../logutils +import ../statemachine +import ../salesagent +import ./filled +import ./finished +import ./failed +import ./errored +import ./cancelled +import ./payout + +logScope: + topics = "marketplace sales unknown" + +type + SaleUnknown* = ref object of SaleState + SaleUnknownError* = object of CatchableError + UnexpectedSlotError* = object of SaleUnknownError + +method `$`*(state: SaleUnknown): string = "SaleUnknown" + +method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State = + return some State(SaleCancelled()) + +method onFailed*(state: SaleUnknown, request: StorageRequest): ?State = + return some State(SaleFailed()) + +method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = + let agent = SalesAgent(machine) + let data = agent.data + let market = agent.context.market + + await agent.retrieveRequest() + await agent.subscribe() + + let slotId = slotId(data.requestId, data.slotIndex) + let slotState = await market.slotState(slotId) + + case slotState + of SlotState.Free: + let error = newException(UnexpectedSlotError, + "slot state on chain should not be 'free'") + return some State(SaleErrored(error: error)) + of SlotState.Filled: + return some State(SaleFilled()) + of SlotState.Finished: + return some State(SalePayout()) + of SlotState.Paid: + return some State(SaleFinished()) + of SlotState.Failed: + return some State(SaleFailed()) + of SlotState.Cancelled: + return some State(SaleCancelled()) diff --git a/codex/slots.nim b/codex/slots.nim new file mode 100644 index 00000000..0fe9d59e --- /dev/null +++ b/codex/slots.nim @@ -0,0 +1,6 @@ +import ./slots/builder +import ./slots/sampler +import ./slots/proofs +import ./slots/types + +export builder, sampler, proofs, types diff --git a/codex/slots/builder.nim b/codex/slots/builder.nim new file mode 100644 index 00000000..9df03f16 --- /dev/null +++ b/codex/slots/builder.nim @@ -0,0 +1,9 @@ +import ./builder/builder +import ./converters + +import ../merkletree + +export builder, converters + +type + Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim new file mode 100644 index 00000000..f999a514 --- /dev/null +++ b/codex/slots/builder/builder.nim @@ -0,0 +1,395 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [].} + +import std/math +import std/sequtils +import std/sugar + +import pkg/libp2p +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/constantine/math/io/io_fields + +import ../../logutils +import ../../utils +import ../../stores +import ../../manifest +import ../../merkletree +import ../../utils/digest +import ../../utils/asynciter +import ../../indexingstrategy + +import ../converters + +export converters, asynciter + +logScope: + topics = "codex slotsbuilder" + +type + SlotsBuilder*[T, H] = ref object of RootObj + store: BlockStore + manifest: Manifest # current manifest + strategy: IndexingStrategy # indexing strategy + cellSize: NBytes # cell size + numSlotBlocks: Natural # number of blocks per slot (should yield a power of two number of cells) + slotRoots: seq[H] # roots of the slots + emptyBlock: seq[byte] # empty block + verifiableTree: ?T # verification tree (dataset tree) + emptyDigestTree: T # empty digest tree for empty blocks + +func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} = + ## Returns true if the slots are verifiable. + ## + + self.manifest.verifiable + +func slotRoots*[T, H](self: SlotsBuilder[T, H]): seq[H] {.inline.} = + ## Returns the slot roots. + ## + + self.slotRoots + +func verifyTree*[T, H](self: SlotsBuilder[T, H]): ?T {.inline.} = + ## Returns the slots tree (verification tree). + ## + + self.verifiableTree + +func verifyRoot*[T, H](self: SlotsBuilder[T, H]): ?H {.inline.} = + ## Returns the slots root (verification root). + ## + + if tree =? self.verifyTree and root =? tree.root: + return some root + +func numSlots*[T, H](self: SlotsBuilder[T, H]): Natural = + ## Number of slots. + ## + + self.manifest.numSlots + +func numSlotBlocks*[T, H](self: SlotsBuilder[T, H]): Natural = + ## Number of blocks per slot. + ## + + self.numSlotBlocks + +func numBlocks*[T, H](self: SlotsBuilder[T, H]): Natural = + ## Number of blocks. + ## + + self.numSlotBlocks * self.manifest.numSlots + +func slotBytes*[T, H](self: SlotsBuilder[T, H]): NBytes = + ## Number of bytes per slot. + ## + + (self.manifest.blockSize.int * self.numSlotBlocks).NBytes + +func numBlockCells*[T, H](self: SlotsBuilder[T, H]): Natural = + ## Number of cells per block. + ## + + (self.manifest.blockSize div self.cellSize).Natural + +func cellSize*[T, H](self: SlotsBuilder[T, H]): NBytes = + ## Cell size. + ## + + self.cellSize + +func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural = + ## Number of cells per slot. + ## + + self.numBlockCells * self.numSlotBlocks + +func slotIndiciesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] = + ## Returns the slot indices. + ## + + self.strategy.getIndicies(slot).catch + +func slotIndicies*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] = + ## Returns the slot indices. + ## + + if iter =? self.strategy.getIndicies(slot).catch: + return toSeq(iter) + +func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest = + ## Returns the manifest. + ## + + self.manifest + +proc buildBlockTree*[T, H]( + self: SlotsBuilder[T, H], + blkIdx: Natural, + slotPos: Natural): Future[?!(seq[byte], T)] {.async.} = + ## Build the block digest tree and return a tuple with the + ## block data and the tree. + ## + + logScope: + blkIdx = blkIdx + slotPos = slotPos + numSlotBlocks = self.manifest.numSlotBlocks + cellSize = self.cellSize + + trace "Building block tree" + + if slotPos > (self.manifest.numSlotBlocks - 1): + # pad blocks are 0 byte blocks + trace "Returning empty digest tree for pad block" + return success (self.emptyBlock, self.emptyDigestTree) + + without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err: + error "Failed to get block CID for tree at index", err = err.msg + return failure(err) + + if blk.isEmpty: + success (self.emptyBlock, self.emptyDigestTree) + else: + without tree =? + T.digestTree(blk.data, self.cellSize.int), err: + error "Failed to create digest for block", err = err.msg + return failure(err) + + success (blk.data, tree) + +proc getCellHashes*[T, H]( + self: SlotsBuilder[T, H], + slotIndex: Natural): Future[?!seq[H]] {.async.} = + ## Collect all the cells from a block and return + ## their hashes. + ## + + let + treeCid = self.manifest.treeCid + blockCount = self.manifest.blocksCount + numberOfSlots = self.manifest.numSlots + + logScope: + treeCid = treeCid + origBlockCount = blockCount + numberOfSlots = numberOfSlots + slotIndex = slotIndex + + let hashes = collect(newSeq): + for i, blkIdx in self.strategy.getIndicies(slotIndex): + logScope: + blkIdx = blkIdx + pos = i + + trace "Getting block CID for tree at index" + without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and + digest =? tree.root, err: + error "Failed to get block CID for tree at index", err = err.msg + return failure(err) + + trace "Get block digest", digest = digest.toHex + digest + + success hashes + +proc buildSlotTree*[T, H]( + self: SlotsBuilder[T, H], + slotIndex: Natural): Future[?!T] {.async.} = + ## Build the slot tree from the block digest hashes + ## and return the tree. + + without cellHashes =? (await self.getCellHashes(slotIndex)), err: + error "Failed to select slot blocks", err = err.msg + return failure(err) + + T.init(cellHashes) + +proc buildSlot*[T, H]( + self: SlotsBuilder[T, H], + slotIndex: Natural): Future[?!H] {.async.} = + ## Build a slot tree and store the proofs in + ## the block store. + ## + + logScope: + cid = self.manifest.treeCid + slotIndex = slotIndex + + trace "Building slot tree" + + without tree =? (await self.buildSlotTree(slotIndex)) and + treeCid =? tree.root.?toSlotCid, err: + error "Failed to build slot tree", err = err.msg + return failure(err) + + trace "Storing slot tree", treeCid, slotIndex, leaves = tree.leavesCount + for i, leaf in tree.leaves: + without cellCid =? leaf.toCellCid, err: + error "Failed to get CID for slot cell", err = err.msg + return failure(err) + + without proof =? tree.getProof(i) and + encodableProof =? proof.toEncodableProof, err: + error "Failed to get proof for slot tree", err = err.msg + return failure(err) + + if err =? (await self.store.putCidAndProof( + treeCid, i, cellCid, encodableProof)).errorOption: + error "Failed to store slot tree", err = err.msg + return failure(err) + + tree.root() + +func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T = + T.init(@slotRoots) + +proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} = + ## Build all slot trees and store them in the block store. + ## + + logScope: + cid = self.manifest.treeCid + blockCount = self.manifest.blocksCount + + trace "Building slots" + + if self.slotRoots.len == 0: + self.slotRoots = collect(newSeq): + for i in 0.. 0: + numPadSlotBlocks + numSlotBlocks + else: + numSlotBlocks + + numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot + + emptyBlock = newSeq[byte](manifest.blockSize.int) + emptyDigestTree = ? T.digestTree(emptyBlock, cellSize.int) + + strategy = ? strategy.init( + 0, + numBlocksTotal - 1, + manifest.numSlots).catch + + logScope: + numSlotBlocks = numSlotBlocks + numBlockCells = numBlockCells + numSlotCells = numSlotCells + pow2SlotCells = pow2SlotCells + numPadSlotBlocks = numPadSlotBlocks + numBlocksTotal = numBlocksTotal + numSlotBlocksTotal = numSlotBlocksTotal + strategy = strategy.strategyType + + trace "Creating slots builder" + + var + self = SlotsBuilder[T, H]( + store: store, + manifest: manifest, + strategy: strategy, + cellSize: cellSize, + emptyBlock: emptyBlock, + numSlotBlocks: numSlotBlocksTotal, + emptyDigestTree: emptyDigestTree) + + if manifest.verifiable: + if manifest.slotRoots.len == 0 or + manifest.slotRoots.len != manifest.numSlots: + return failure "Manifest is verifiable but slot roots are missing or invalid." + + let + slotRoots = manifest.slotRoots.mapIt( (? it.fromSlotCid() )) + tree = ? self.buildVerifyTree(slotRoots) + expectedRoot = ? manifest.verifyRoot.fromVerifyCid() + verifyRoot = ? tree.root + + if verifyRoot != expectedRoot: + return failure "Existing slots root doesn't match reconstructed root." + + self.slotRoots = slotRoots + self.verifiableTree = some tree + + success self diff --git a/codex/slots/converters.nim b/codex/slots/converters.nim new file mode 100644 index 00000000..f9716fa3 --- /dev/null +++ b/codex/slots/converters.nim @@ -0,0 +1,88 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/sequtils + +import pkg/libp2p +import pkg/stew/arrayops +import pkg/questionable +import pkg/questionable/results +import pkg/poseidon2 +import pkg/poseidon2/io + +import ../codextypes +import ../merkletree +import ../errors +import ../utils/digest + +func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid = + let + mhash = ? MultiHash.init(mcodec, hash.toBytes).mapFailure + treeCid = ? Cid.init(CIDv1, cidCodec, mhash).mapFailure + success treeCid + +proc toPoseidon2Hash(cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Poseidon2Hash = + if cid.cidver != CIDv1: + return failure("Unexpected CID version") + + if cid.mcodec != cidCodec: + return failure("Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec) + + let + mhash = ? cid.mhash.mapFailure + bytes: array[32, byte] = array[32, byte].initCopyFrom(mhash.digestBytes()) + hash = ? Poseidon2Hash.fromBytes(bytes).toFailure + + success hash + +func toCellCid*(hash: Poseidon2Hash): ?!Cid = + toCid(hash, Pos2Bn128MrklCodec, CodexSlotCellCodec) + +func fromCellCid*(cid: Cid): ?!Poseidon2Hash = + toPoseidon2Hash(cid, Pos2Bn128MrklCodec, CodexSlotCellCodec) + +func toSlotCid*(hash: Poseidon2Hash): ?!Cid = + toCid(hash, multiCodec("identity"), SlotRootCodec) + +func toSlotCids*(slotRoots: openArray[Poseidon2Hash]): ?!seq[Cid] = + success slotRoots.mapIt( ? it.toSlotCid ) + +func fromSlotCid*(cid: Cid): ?!Poseidon2Hash = + toPoseidon2Hash(cid, multiCodec("identity"), SlotRootCodec) + +func toVerifyCid*(hash: Poseidon2Hash): ?!Cid = + toCid(hash, multiCodec("identity"), SlotProvingRootCodec) + +func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash = + toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec) + +func toEncodableProof*( + proof: Poseidon2Proof): ?!CodexProof = + + let + encodableProof = CodexProof( + mcodec: multiCodec("identity"), + index: proof.index, + nleaves: proof.nleaves, + path: proof.path.mapIt( @( it.toBytes ) )) + + success encodableProof + +func toVerifiableProof*( + proof: CodexProof): ?!Poseidon2Proof = + + let + nodes = proof.path.mapIt( + ? Poseidon2Hash.fromBytes(it.toArray32).toFailure + ) + + Poseidon2Proof.init( + index = proof.index, + nleaves = proof.nleaves, + nodes = nodes) diff --git a/codex/slots/proofs.nim b/codex/slots/proofs.nim new file mode 100644 index 00000000..0b4ad667 --- /dev/null +++ b/codex/slots/proofs.nim @@ -0,0 +1,4 @@ +import ./proofs/backends +import ./proofs/prover + +export circomcompat, prover diff --git a/codex/slots/proofs/backends.nim b/codex/slots/proofs/backends.nim new file mode 100644 index 00000000..477ba140 --- /dev/null +++ b/codex/slots/proofs/backends.nim @@ -0,0 +1,3 @@ +import ./backends/circomcompat + +export circomcompat diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim new file mode 100644 index 00000000..8619457a --- /dev/null +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -0,0 +1,255 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [].} + +import std/sugar + +import pkg/chronos +import pkg/questionable/results +import pkg/circomcompat + +import ../../types +import ../../../stores +import ../../../contracts + +import ./converters + +export circomcompat, converters + +type + CircomCompat* = object + slotDepth : int # max depth of the slot tree + datasetDepth : int # max depth of dataset tree + blkDepth : int # depth of the block merkle tree (pow2 for now) + cellElms : int # number of field elements per cell + numSamples : int # number of samples per slot + r1csPath : string # path to the r1cs file + wasmPath : string # path to the wasm file + zkeyPath : string # path to the zkey file + backendCfg : ptr CircomBn254Cfg + vkp* : ptr CircomKey + + NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H] + +func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): + NormalizedProofInputs[H] = + ## Parameters in CIRCOM circuits are statically sized and must be properly + ## padded before they can be passed onto the circuit. This function takes + ## variable length parameters and performs that padding. + ## + ## The output from this function can be JSON-serialized and used as direct + ## inputs to the CIRCOM circuit for testing and debugging when one wishes + ## to bypass the Rust FFI. + + let normSamples = collect: + for sample in input.samples: + var merklePaths = sample.merklePaths + merklePaths.setLen(self.slotDepth) + Sample[H]( + cellData: sample.cellData, + merklePaths: merklePaths + ) + + var normSlotProof = input.slotProof + normSlotProof.setLen(self.datasetDepth) + + NormalizedProofInputs[H] ProofInputs[H]( + entropy: input.entropy, + datasetRoot: input.datasetRoot, + slotIndex: input.slotIndex, + slotRoot: input.slotRoot, + nCellsPerSlot: input.nCellsPerSlot, + nSlotsPerDataSet: input.nSlotsPerDataSet, + slotProof: normSlotProof, + samples: normSamples + ) + +proc release*(self: CircomCompat) = + ## Release the ctx + ## + + if not isNil(self.backendCfg): + self.backendCfg.unsafeAddr.releaseCfg() + + if not isNil(self.vkp): + self.vkp.unsafeAddr.release_key() + +proc prove[H]( + self: CircomCompat, + input: NormalizedProofInputs[H]): ?!CircomProof = + + doAssert input.samples.len == self.numSamples, + "Number of samples does not match" + + doAssert input.slotProof.len <= self.datasetDepth, + "Slot proof is too deep - dataset has more slots than what we can handle?" + + doAssert input.samples.allIt( + block: + (it.merklePaths.len <= self.slotDepth + self.blkDepth and + it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit" + + # TODO: All parameters should match circom's static parametter + var + ctx: ptr CircomCompatCtx + + defer: + if ctx != nil: + ctx.addr.releaseCircomCompat() + + if initCircomCompat( + self.backendCfg, + addr ctx) != ERR_OK or ctx == nil: + raiseAssert("failed to initialize CircomCompat ctx") + + var + entropy = input.entropy.toBytes + dataSetRoot = input.datasetRoot.toBytes + slotRoot = input.slotRoot.toBytes + + if ctx.pushInputU256Array( + "entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK: + return failure("Failed to push entropy") + + if ctx.pushInputU256Array( + "dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK: + return failure("Failed to push data set root") + + if ctx.pushInputU256Array( + "slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK: + return failure("Failed to push data set root") + + if ctx.pushInputU32( + "nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK: + return failure("Failed to push nCellsPerSlot") + + if ctx.pushInputU32( + "nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK: + return failure("Failed to push nSlotsPerDataSet") + + if ctx.pushInputU32( + "slotIndex".cstring, input.slotIndex.uint32) != ERR_OK: + return failure("Failed to push slotIndex") + + var + slotProof = input.slotProof.mapIt( it.toBytes ).concat + + doAssert(slotProof.len == self.datasetDepth) + # arrays are always flattened + if ctx.pushInputU256Array( + "slotProof".cstring, + slotProof[0].addr, + uint (slotProof[0].len * slotProof.len)) != ERR_OK: + return failure("Failed to push slot proof") + + for s in input.samples: + var + merklePaths = s.merklePaths.mapIt( it.toBytes ) + data = s.cellData.mapIt( @(it.toBytes) ).concat + + if ctx.pushInputU256Array( + "merklePaths".cstring, + merklePaths[0].addr, + uint (merklePaths[0].len * merklePaths.len)) != ERR_OK: + return failure("Failed to push merkle paths") + + if ctx.pushInputU256Array( + "cellData".cstring, + data[0].addr, + data.len.uint) != ERR_OK: + return failure("Failed to push cell data") + + var + proofPtr: ptr Proof = nil + + let proof = + try: + if ( + let res = self.backendCfg.proveCircuit(ctx, proofPtr.addr); + res != ERR_OK) or + proofPtr == nil: + return failure("Failed to prove - err code: " & $res) + + proofPtr[] + finally: + if proofPtr != nil: + proofPtr.addr.releaseProof() + + success proof + +proc prove*[H]( + self: CircomCompat, + input: ProofInputs[H]): ?!CircomProof = + + self.prove(self.normalizeInput(input)) + +proc verify*[H]( + self: CircomCompat, + proof: CircomProof, + inputs: ProofInputs[H]): ?!bool = + ## Verify a proof using a ctx + ## + + var + proofPtr = unsafeAddr proof + inputs = inputs.toCircomInputs() + + try: + let res = verifyCircuit(proofPtr, inputs.addr, self.vkp) + if res == ERR_OK: + success true + elif res == ERR_FAILED_TO_VERIFY_PROOF: + success false + else: + failure("Failed to verify proof - err code: " & $res) + finally: + inputs.releaseCircomInputs() + +proc init*( + _: type CircomCompat, + r1csPath : string, + wasmPath : string, + zkeyPath : string = "", + slotDepth = DefaultMaxSlotDepth, + datasetDepth = DefaultMaxDatasetDepth, + blkDepth = DefaultBlockDepth, + cellElms = DefaultCellElms, + numSamples = DefaultSamplesNum): CircomCompat = + ## Create a new ctx + ## + + var cfg: ptr CircomBn254Cfg + var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil + + if initCircomConfig( + r1csPath.cstring, + wasmPath.cstring, + zkey, cfg.addr) != ERR_OK or cfg == nil: + if cfg != nil: cfg.addr.releaseCfg() + raiseAssert("failed to initialize circom compat config") + + var + vkpPtr: ptr VerifyingKey = nil + + if cfg.getVerifyingKey(vkpPtr.addr) != ERR_OK or vkpPtr == nil: + if vkpPtr != nil: vkpPtr.addr.releaseKey() + raiseAssert("Failed to get verifying key") + + CircomCompat( + r1csPath : r1csPath, + wasmPath : wasmPath, + zkeyPath : zkeyPath, + slotDepth : slotDepth, + datasetDepth: datasetDepth, + blkDepth : blkDepth, + cellElms : cellElms, + numSamples : numSamples, + backendCfg : cfg, + vkp : vkpPtr) diff --git a/codex/slots/proofs/backends/converters.nim b/codex/slots/proofs/backends/converters.nim new file mode 100644 index 00000000..60c64f5c --- /dev/null +++ b/codex/slots/proofs/backends/converters.nim @@ -0,0 +1,70 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [].} + +import pkg/circomcompat + +import ../../../contracts +import ../../types +import ../../../merkletree + +type + CircomG1* = G1 + CircomG2* = G2 + + CircomProof* = Proof + CircomKey* = VerifyingKey + CircomInputs* = Inputs + +proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = + var + slotIndex = inputs.slotIndex.toF.toBytes.toArray32 + datasetRoot = inputs.datasetRoot.toBytes.toArray32 + entropy = inputs.entropy.toBytes.toArray32 + + elms = [ + entropy, + datasetRoot, + slotIndex + ] + + let inputsPtr = allocShared0(32 * elms.len) + copyMem(inputsPtr, addr elms[0], elms.len * 32) + + CircomInputs( + elms: cast[ptr array[32, byte]](inputsPtr), + len: elms.len.uint) + +proc releaseCircomInputs*(inputs: var CircomInputs) = + if not inputs.elms.isNil: + deallocShared(inputs.elms) + inputs.elms = nil + +func toG1*(g: CircomG1): G1Point = + G1Point( + x: UInt256.fromBytesLE(g.x), + y: UInt256.fromBytesLE(g.y)) + +func toG2*(g: CircomG2): G2Point = + G2Point( + x: Fp2Element( + real: UInt256.fromBytesLE(g.x[0]), + imag: UInt256.fromBytesLE(g.x[1]) + ), + y: Fp2Element( + real: UInt256.fromBytesLE(g.y[0]), + imag: UInt256.fromBytesLE(g.y[1]) + )) + +func toGroth16Proof*(proof: CircomProof): Groth16Proof = + Groth16Proof( + a: proof.a.toG1, + b: proof.b.toG2, + c: proof.c.toG1) diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim new file mode 100644 index 00000000..9077c478 --- /dev/null +++ b/codex/slots/proofs/prover.nim @@ -0,0 +1,101 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. +## + +import pkg/chronos +import pkg/chronicles +import pkg/circomcompat +import pkg/poseidon2 +import pkg/questionable/results + +import pkg/libp2p/cid + +import ../../manifest +import ../../merkletree +import ../../stores +import ../../market +import ../../utils/poseidon2digest + +import ../builder +import ../sampler + +import ./backends +import ../types + +export backends + +logScope: + topics = "codex prover" + +type + AnyBackend* = CircomCompat + AnyProof* = CircomProof + + AnySampler* = Poseidon2Sampler + AnyBuilder* = Poseidon2Builder + + AnyProofInputs* = ProofInputs[Poseidon2Hash] + Prover* = ref object of RootObj + backend: AnyBackend + store: BlockStore + nSamples: int + +proc prove*( + self: Prover, + slotIdx: int, + manifest: Manifest, + challenge: ProofChallenge): Future[?!(AnyProofInputs, AnyProof)] {.async.} = + ## Prove a statement using backend. + ## Returns a future that resolves to a proof. + + logScope: + cid = manifest.treeCid + slot = slotIdx + challenge = challenge + + trace "Received proof challenge" + + without builder =? AnyBuilder.new(self.store, manifest), err: + error "Unable to create slots builder", err = err.msg + return failure(err) + + without sampler =? AnySampler.new(slotIdx, self.store, builder), err: + error "Unable to create data sampler", err = err.msg + return failure(err) + + without proofInput =? await sampler.getProofInput(challenge, self.nSamples), err: + error "Unable to get proof input for slot", err = err.msg + return failure(err) + + # prove slot + without proof =? self.backend.prove(proofInput), err: + error "Unable to prove slot", err = err.msg + return failure(err) + + success (proofInput, proof) + +proc verify*( + self: Prover, + proof: AnyProof, + inputs: AnyProofInputs): Future[?!bool] {.async.} = + ## Prove a statement using backend. + ## Returns a future that resolves to a proof. + + self.backend.verify(proof, inputs) + +proc new*( + _: type Prover, + store: BlockStore, + backend: AnyBackend, + nSamples: int): Prover = + + Prover( + backend: backend, + store: store, + nSamples: nSamples) diff --git a/codex/slots/sampler.nim b/codex/slots/sampler.nim new file mode 100644 index 00000000..10ea2656 --- /dev/null +++ b/codex/slots/sampler.nim @@ -0,0 +1,9 @@ +import ./sampler/sampler +import ./sampler/utils + +import ../merkletree + +export sampler, utils + +type + Poseidon2Sampler* = DataSampler[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/sampler/sampler.nim b/codex/slots/sampler/sampler.nim new file mode 100644 index 00000000..3270d55a --- /dev/null +++ b/codex/slots/sampler/sampler.nim @@ -0,0 +1,153 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/sugar + +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/stew/arrayops + +import ../../logutils +import ../../market +import ../../blocktype as bt +import ../../merkletree +import ../../manifest +import ../../stores + +import ../converters +import ../builder +import ../types +import ./utils + +logScope: + topics = "codex datasampler" + +type + DataSampler*[T, H] = ref object of RootObj + index: Natural + blockStore: BlockStore + builder: SlotsBuilder[T, H] + +func getCell*[T, H]( + self: DataSampler[T, H], + blkBytes: seq[byte], + blkCellIdx: Natural): seq[H] = + + let + cellSize = self.builder.cellSize.uint64 + dataStart = cellSize * blkCellIdx.uint64 + dataEnd = dataStart + cellSize + + doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size" + + blkBytes[dataStart ..< dataEnd].elements(H).toSeq() + +proc getSample*[T, H]( + self: DataSampler[T, H], + cellIdx: int, + slotTreeCid: Cid, + slotRoot: H): Future[?!Sample[H]] {.async.} = + + let + cellsPerBlock = self.builder.numBlockCells + blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index + blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index + origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx] # convert to original dataset block index + + logScope: + cellIdx = cellIdx + blkSlotIdx = blkSlotIdx + blkCellIdx = blkCellIdx + origBlockIdx = origBlockIdx + + trace "Retrieving sample from block tree" + let + (_, proof) = (await self.blockStore.getCidAndProof( + slotTreeCid, blkSlotIdx.Natural)).valueOr: + return failure("Failed to get slot tree CID and proof") + + slotProof = proof.toVerifiableProof().valueOr: + return failure("Failed to get verifiable proof") + + (bytes, blkTree) = (await self.builder.buildBlockTree( + origBlockIdx, blkSlotIdx)).valueOr: + return failure("Failed to build block tree") + + cellData = self.getCell(bytes, blkCellIdx) + cellProof = blkTree.getProof(blkCellIdx).valueOr: + return failure("Failed to get proof from block tree") + + success Sample[H]( + cellData: cellData, + merklePaths: (cellProof.path & slotProof.path)) + +proc getProofInput*[T, H]( + self: DataSampler[T, H], + entropy: ProofChallenge, + nSamples: Natural): Future[?!ProofInputs[H]] {.async.} = + ## Generate proofs as input to the proving circuit. + ## + + let + entropy = H.fromBytes( + array[31, byte].initCopyFrom(entropy[0..30])) # truncate to 31 bytes, otherwise it _might_ be greater than mod + + verifyTree = self.builder.verifyTree.toFailure.valueOr: + return failure("Failed to get verify tree") + + slotProof = verifyTree.getProof(self.index).valueOr: + return failure("Failed to get slot proof") + + datasetRoot = verifyTree.root().valueOr: + return failure("Failed to get dataset root") + + slotTreeCid = self.builder.manifest.slotRoots[self.index] + slotRoot = self.builder.slotRoots[self.index] + cellIdxs = entropy.cellIndices( + slotRoot, + self.builder.numSlotCells, + nSamples) + + logScope: + cells = cellIdxs + + trace "Collecting input for proof" + let samples = collect(newSeq): + for cellIdx in cellIdxs: + (await self.getSample(cellIdx, slotTreeCid, slotRoot)).valueOr: + return failure("Failed to get sample") + + success ProofInputs[H]( + entropy: entropy, + datasetRoot: datasetRoot, + slotProof: slotProof.path, + nSlotsPerDataSet: self.builder.numSlots, + nCellsPerSlot: self.builder.numSlotCells, + slotRoot: slotRoot, + slotIndex: self.index, + samples: samples) + +proc new*[T, H]( + _: type DataSampler[T, H], + index: Natural, + blockStore: BlockStore, + builder: SlotsBuilder[T, H]): ?!DataSampler[T, H] = + + if index > builder.slotRoots.high: + error "Slot index is out of range" + return failure("Slot index is out of range") + + if not builder.verifiable: + return failure("Cannot instantiate DataSampler for non-verifiable builder") + + success DataSampler[T, H]( + index: index, + blockStore: blockStore, + builder: builder) diff --git a/codex/slots/sampler/utils.nim b/codex/slots/sampler/utils.nim new file mode 100644 index 00000000..998f2cdc --- /dev/null +++ b/codex/slots/sampler/utils.nim @@ -0,0 +1,75 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/bitops + +import pkg/questionable/results +import pkg/constantine/math/arithmetic + +import ../../merkletree + +func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 = + doAssert( k > 0 and k <= 64 ) + var r = 0'u64 + for i in 0.. 0 ) + var k = -1 + var y = x + while (y > 0): + k += 1 + y = y shr 1 + return k + +func ceilingLog2*(x : int) : int = + doAssert ( x > 0 ) + return (floorLog2(x - 1) + 1) + +func toBlkInSlot*(cell: Natural, numCells: Natural): Natural = + let log2 = ceilingLog2(numCells) + doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + + return cell div numCells + +func toCellInBlk*(cell: Natural, numCells: Natural): Natural = + let log2 = ceilingLog2(numCells) + doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + + return cell mod numCells + +func cellIndex*( + entropy: Poseidon2Hash, + slotRoot: Poseidon2Hash, + numCells: Natural, counter: Natural): Natural = + let log2 = ceilingLog2(numCells) + doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + + let hash = Sponge.digest( @[ entropy, slotRoot, counter.toF ], rate = 2 ) + return int( extractLowBits(hash, log2) ) + +func cellIndices*( + entropy: Poseidon2Hash, + slotRoot: Poseidon2Hash, + numCells: Natural, nSamples: Natural): seq[Natural] = + + var indices: seq[Natural] + for i in 1..nSamples: + indices.add(cellIndex(entropy, slotRoot, numCells, i)) + + indices diff --git a/codex/slots/types.nim b/codex/slots/types.nim new file mode 100644 index 00000000..8703086e --- /dev/null +++ b/codex/slots/types.nim @@ -0,0 +1,28 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +type + Sample*[H] = object + cellData*: seq[H] + merklePaths*: seq[H] + + PublicInputs*[H] = object + slotIndex*: int + datasetRoot*: H + entropy*: H + + ProofInputs*[H] = object + entropy*: H + datasetRoot*: H + slotIndex*: Natural + slotRoot*: H + nCellsPerSlot*: Natural + nSlotsPerDataSet*: Natural + slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) + samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) diff --git a/codex/storageproofs.nim b/codex/storageproofs.nim deleted file mode 100644 index ef0a8f00..00000000 --- a/codex/storageproofs.nim +++ /dev/null @@ -1,7 +0,0 @@ -import ./storageproofs/por -import ./storageproofs/timing -import ./storageproofs/stpstore -import ./storageproofs/stpnetwork -import ./storageproofs/stpproto - -export por, timing, stpstore, stpnetwork, stpproto diff --git a/codex/storageproofs/por.nim b/codex/storageproofs/por.nim deleted file mode 100644 index 79ada5dd..00000000 --- a/codex/storageproofs/por.nim +++ /dev/null @@ -1,4 +0,0 @@ -import ./por/serialization -import ./por/por - -export por, serialization diff --git a/codex/storageproofs/por/por.nim b/codex/storageproofs/por/por.nim deleted file mode 100644 index f2f465a3..00000000 --- a/codex/storageproofs/por/por.nim +++ /dev/null @@ -1,533 +0,0 @@ -## Nim-Codex -## Copyright (c) 2021 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -# Implementation of the BLS-based public PoS scheme from -# Shacham H., Waters B., "Compact Proofs of Retrievability" -# using pairing over BLS12-381 ECC -# -# Notation from the paper -# In Z: -# - n: number of blocks -# - s: number of sectors per block -# -# In Z_p: modulo curve order -# - m_{ij}: sectors of the file i:0..n-1 j:0..s-1 -# - α: PoS secret key -# - name: random string -# - μ_j: part of proof, j:0..s-1 -# -# In G_1: multiplicative cyclic group -# - H: {0,1}∗ →G_1 : hash function -# - u_1,…,u_s ←R G_1 : random coefficients -# - σ_i: authenticators -# - σ: part of proof -# -# In G_2: multiplicative cyclic group -# - g: generator of G_2 -# - v ← g^α: PoS public key -# -# In G_T: -# - used only to calculate the two pairings during validation -# -# Implementation: -# Our implementation uses additive cyclic groups instead of the multiplicative -# cyclic group in the paper, thus changing the name of the group operation as in -# blscurve and blst. Thus, point multiplication becomes point addition, and scalar -# exponentiation becomes scalar multiplicaiton. -# -# Number of operations: -# The following table summarizes the number of operations in different phases -# using the following notation: -# - f: file size expressed in units of 31 bytes -# - n: number of blocks -# - s: number of sectors per block -# - q: number of query items -# -# Since f = n * s and s is a parameter of the scheme, it is better to express -# the cost as a function of f and s. This only matters for Setup, all other -# phases are independent of the file size assuming a given q. -# -# | | Setup | Challenge | Proof | Verify | -# |----------------|-----------|---------------|-----------|-----------|-----------| -# | G1 random | s = s | q | | | -# | G1 scalar mult | n * (s+1) = f * (1 + 1/s) | | q | q + s | -# | G1 add | n * s = f | | q-1 | q-1 + s-1 | -# | Hash to G1 | n = f / s | | | q | -# | Z_p mult | = | | s * q | | -# | Z_p add | = | | s * (q-1) | | -# | pairing | = | | | 2 | -# -# -# Storage and communication cost: -# The storage overhead for a file of f_b bytes is given by the n authenticators -# calculated in the setup phase. -# f_b = f * 31 = n * s * 31 -# Each authenticator is a point on G_1, which occupies 48 bytes in compressed form. -# Thus, the overall sorage size in bytes is: -# f_pos = fb + n * 48 = fb * (1 + (48/31) * (1/s)) -# -# Communicaiton cost in the Setup phase is simply related to the storage cost. -# The size of the challenge is -# q * (8 + 48) bytes -# The size of the proof is instead -# s * 32 + 48 bytes -import std/endians - -import pkg/chronos -import pkg/blscurve -import pkg/blscurve/blst/blst_abi - -import ../../rng -import ../../streams - -# sector size in bytes. Must be smaller than the subgroup order r -# which is 255 bits long for BLS12-381 -const - BytesPerSector* = 31 - - # length in bytes of the unique (random) name - Namelen = 512 - -type - # a single sector - ZChar* = array[BytesPerSector, byte] - - # secret key combining the metadata signing key and the POR generation key - SecretKey* = object - signkey*: blscurve.SecretKey - key*: blst_scalar - - # public key combining the metadata signing key and the POR validation key - PublicKey* = object - signkey*: blscurve.PublicKey - key*: blst_p2 - - # POR metadata (called "file tag t_0" in the original paper) - TauZero* = object - name*: array[Namelen, byte] - n*: int64 - u*: seq[blst_p1] - - # signed POR metadata (called "signed file tag t" in the original paper) - Tau* = object - t*: TauZero - signature*: array[96, byte] - - Proof* = object - mu*: seq[blst_scalar] - sigma*: blst_p1 - - # PoR query element - QElement* = object - I*: int64 - V*: blst_scalar - - PoR* = object - ssk*: SecretKey - spk*: PublicKey - tau*: Tau - authenticators*: seq[blst_p1] - -proc fromBytesBE(a: array[32, byte]): blst_scalar = - ## Convert data to blst native form - ## - - blst_scalar_from_bendian(result, a) - doAssert(blst_scalar_fr_check(result).bool) - -proc fromBytesBE(a: openArray[byte]): blst_scalar = - ## Convert data to blst native form - ## - - var b: array[32, byte] - doAssert(a.len <= b.len) - - let d = b.len - a.len - for i in 0.. postion - ## - - var res: ZChar - stream.setPos(((blockid * spb + sectorid) * ZChar.len).int) - discard await stream.readOnce(addr res[0], ZChar.len) - return res - -proc rndScalar(): blst_scalar = - ## Generate random scalar within the subroup order r - ## - - var scal {.noInit.}: array[32, byte] - var scalar {.noInit.}: blst_scalar - - while true: - for val in scal.mitems: - val = byte Rng.instance.rand(0xFF) - - scalar.blst_scalar_from_bendian(scal) - if blst_scalar_fr_check(scalar).bool: - break - - return scalar - -proc rndP2(): (blst_p2, blst_scalar) = - ## Generate random point on G2 - ## - - var - x {.noInit.}: blst_p2 - x.blst_p2_from_affine(BLS12_381_G2) # init from generator - - let - scalar = rndScalar() - x.blst_p2_mult(x, scalar, 255) - - return (x, scalar) - -proc rndP1(): (blst_p1, blst_scalar) = - ## Generate random point on G1 - var - x {.noInit.}: blst_p1 - x.blst_p1_from_affine(BLS12_381_G1) # init from generator - - let - scalar = rndScalar() - x.blst_p1_mult(x, scalar, 255) - - return (x, scalar) - -template posKeygen(): (blst_p2, blst_scalar) = - ## Generate POS key pair - ## - - rndP2() - -proc keyGen*(): (PublicKey, SecretKey) = - ## Generate key pair for signing metadata and for POS tags - ## - - var - pk: PublicKey - sk: SecretKey - ikm: array[32, byte] - - for b in ikm.mitems: - b = byte Rng.instance.rand(0xFF) - - doAssert ikm.keyGen(pk.signkey, sk.signkey) - - (pk.key, sk.key) = posKeygen() - return (pk, sk) - -proc sectorsCount(stream: SeekableStream, s: int64): int64 = - ## Calculate number of blocks for a file - ## - - let - size = stream.size() - n = ((size - 1) div (s * sizeof(ZChar))) + 1 - # debugEcho "File size=", size, " bytes", - # ", blocks=", n, - # ", sectors/block=", $s, - # ", sectorsize=", $sizeof(ZChar), " bytes" - - return n - -proc hashToG1[T: byte|char](msg: openArray[T]): blst_p1 = - ## Hash to curve with Dagger specific domain separation - ## - - const dst = "DAGGER-PROOF-OF-CONCEPT" - result.blst_hash_to_g1(msg, dst, aug = "") - -proc hashNameI(name: array[Namelen, byte], i: int64): blst_p1 = - ## Calculate unique filename and block index based hash - ## - - # # naive implementation, hashing a long string representation - # # such as "[255, 242, 23]1" - # return hashToG1($name & $i) - - # more compact and faster implementation - var namei: array[sizeof(name) + sizeof(int64), byte] - namei[0..sizeof(name)-1] = name - bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i)) - return hashToG1(namei) - -proc generateAuthenticatorNaive( - stream: SeekableStream, - ssk: SecretKey, - i: int64, - s: int64, - t: TauZero): Future[blst_p1] {.async.} = - ## Naive implementation of authenticator as in the S&W paper. - ## With the paper's multiplicative notation: - ## \sigmai=\(H(file||i)\cdot\prod{j=0}^{s-1}{uj^{m[i][j]}})^{\alpha} - ## - - var sum: blst_p1 - for j in 0.. G_T - ## - - var - aa: blst_p1_affine - bb: blst_p2_affine - l: blst_fp12 - - blst_p1_to_affine(aa, a) - blst_p2_to_affine(bb, b) - - blst_miller_loop(l, bb, aa) - blst_final_exp(result, l) - -proc verifyPairingsNaive(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool = - let e1 = pairing(a1, a2) - let e2 = pairing(b1, b2) - return e1 == e2 - -proc verifyPairingsNeg(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool = - ## Faster pairing verification using 2 miller loops but ony one final exponentiation - ## based on https://github.com/benjaminion/c-kzg/blob/main/src/bls12_381.c - ## - - var - loop0, loop1, gt_point: blst_fp12 - aa1, bb1: blst_p1_affine - aa2, bb2: blst_p2_affine - - var a1neg = a1 - blst_p1_cneg(a1neg, 1) - - blst_p1_to_affine(aa1, a1neg) - blst_p1_to_affine(bb1, b1) - blst_p2_to_affine(aa2, a2) - blst_p2_to_affine(bb2, b2) - - blst_miller_loop(loop0, aa2, aa1) - blst_miller_loop(loop1, bb2, bb1) - - blst_fp12_mul(gt_point, loop0, loop1) - blst_final_exp(gt_point, gt_point) - - return blst_fp12_is_one(gt_point).bool - -proc verifyPairings(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool = - ## Wrapper to select verify pairings implementation - ## - - verifyPairingsNaive(a1, a2, b1, b2) - #verifyPairingsNeg(a1, a2, b1, b2) - -proc verifyProof*( - self: PoR, - q: seq[QElement], - mus: seq[blst_scalar], - sigma: blst_p1): bool = - ## Verify a BLS proof given a query - ## - - # verify signature on Tau - var signature: blscurve.Signature - if not signature.fromBytes(self.tau.signature): - return false - - if not verify(self.spk.signkey, $self.tau.t, signature): - return false - - var first: blst_p1 - for qelem in q: - var prod: blst_p1 - prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.I), qelem.V, 255) - first.blst_p1_add_or_double(first, prod) - doAssert(blst_p1_on_curve(first).bool) - - let us = self.tau.t.u - var second: blst_p1 - for j in 0.. 0: - await self.tagsHandle(res.get) - except CatchableError as exc: - trace "Exception handling Storage Proofs message", exc = exc.msg - finally: - await conn.close() - - self.handler = handle - self.codec = Codec - -proc new*( - T: type StpNetwork, - switch: Switch, - discovery: Discovery): StpNetwork = - let - self = StpNetwork( - switch: switch, - discovery: discovery) - - self.init() - self diff --git a/codex/storageproofs/stpproto.nim b/codex/storageproofs/stpproto.nim deleted file mode 100644 index 364582be..00000000 --- a/codex/storageproofs/stpproto.nim +++ /dev/null @@ -1,3 +0,0 @@ -import ./stpproto/messages - -export messages diff --git a/codex/storageproofs/stpproto/messages.nim b/codex/storageproofs/stpproto/messages.nim deleted file mode 100644 index d5294bbb..00000000 --- a/codex/storageproofs/stpproto/messages.nim +++ /dev/null @@ -1,68 +0,0 @@ -## Nim-Codex -## Copyright (c) 2022 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -import pkg/questionable/results -import pkg/libp2p/protobuf/minprotobuf - -import ../../errors - -type - Tag* = object - idx*: int64 - tag*: seq[byte] - - TagsMessage* = object - cid*: seq[byte] - tags*: seq[Tag] - -func write*(pb: var ProtoBuffer, field: int, value: Tag) = - var ipb = initProtoBuffer() - ipb.write(1, value.idx.uint64) - ipb.write(2, value.tag) - ipb.finish() - pb.write(field, ipb) - -func encode*(msg: TagsMessage): seq[byte] = - var ipb = initProtoBuffer() - ipb.write(1, msg.cid) - - for tag in msg.tags: - ipb.write(2, tag) - - ipb.finish() - ipb.buffer - -func decode*(_: type Tag, pb: ProtoBuffer): ProtoResult[Tag] = - var - value = Tag() - idx: uint64 - - discard ? pb.getField(1, idx) - value.idx = idx.int64 - - discard ? pb.getField(2, value.tag) - - ok(value) - -func decode*(_: type TagsMessage, msg: openArray[byte]): ProtoResult[TagsMessage] = - var - value = TagsMessage() - pb = initProtoBuffer(msg) - - discard ? pb.getField(1, value.cid) - - var - bytes: seq[seq[byte]] - - discard ? pb.getRepeatedField(2, bytes) - - for b in bytes: - value.tags.add(? Tag.decode(initProtoBuffer(b))) - - ok(value) diff --git a/codex/storageproofs/stpstore.nim b/codex/storageproofs/stpstore.nim deleted file mode 100644 index b4ab14bd..00000000 --- a/codex/storageproofs/stpstore.nim +++ /dev/null @@ -1,123 +0,0 @@ -## Nim-Dagger -## Copyright (c) 2022 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -import std/os -import std/strformat - -import pkg/libp2p -import pkg/chronos -import pkg/chronicles -import pkg/stew/io2 -import pkg/questionable -import pkg/questionable/results - -import ../errors -import ../formats - -import ./stpproto -import ./por - -type - StpStore* = object - authDir*: string - postfixLen*: int - -template stpPath*(self: StpStore, cid: Cid): string = - self.authDir / ($cid)[^self.postfixLen..^1] / $cid - -proc retrieve*( - self: StpStore, - cid: Cid): Future[?!PorMessage] {.async.} = - ## Retrieve authenticators from data store - ## - - let path = self.stpPath(cid) / "por" - var data: seq[byte] - if ( - let res = io2.readFile(path, data); - res.isErr): - let error = io2.ioErrorMsg(res.error) - trace "Cannot retrieve storage proof data from fs", path , error - return failure("Cannot retrieve storage proof data from fs") - - return PorMessage.decode(data).mapFailure - -proc store*( - self: StpStore, - por: PorMessage, - cid: Cid): Future[?!void] {.async.} = - ## Persist storage proofs - ## - - let - dir = self.stpPath(cid) - - if io2.createPath(dir).isErr: - trace "Unable to create storage proofs prefix dir", dir - return failure(&"Unable to create storage proofs prefix dir ${dir}") - - let path = dir / "por" - if ( - let res = io2.writeFile(path, por.encode()); - res.isErr): - let error = io2.ioErrorMsg(res.error) - trace "Unable to store storage proofs", path, cid, error - return failure( - &"Unable to store storage proofs - path = ${path} cid = ${cid} error = ${error}") - - return success() - -proc retrieve*( - self: StpStore, - cid: Cid, - blocks: seq[int]): Future[?!seq[Tag]] {.async.} = - var tags: seq[Tag] - for b in blocks: - var tag = Tag(idx: b) - let path = self.stpPath(cid) / $b - if ( - let res = io2.readFile(path, tag.tag); - res.isErr): - let error = io2.ioErrorMsg(res.error) - trace "Cannot retrieve tags from fs", path , error - return failure("Cannot retrieve tags from fs") - tags.add(tag) - - return tags.success - -proc store*( - self: StpStore, - tags: seq[Tag], - cid: Cid): Future[?!void] {.async.} = - let - dir = self.stpPath(cid) - - if io2.createPath(dir).isErr: - trace "Unable to create storage proofs prefix dir", dir - return failure(&"Unable to create storage proofs prefix dir ${dir}") - - for t in tags: - let path = dir / $t.idx - if ( - let res = io2.writeFile(path, t.tag); - res.isErr): - let error = io2.ioErrorMsg(res.error) - trace "Unable to store tags", path, cid, error - return failure( - &"Unable to store tags - path = ${path} cid = ${cid} error = ${error}") - - return success() - -proc init*( - T: type StpStore, - authDir: string, - postfixLen: int = 2): StpStore = - T( - authDir: authDir, - postfixLen: postfixLen) diff --git a/codex/storageproofs/timing.nim b/codex/storageproofs/timing.nim deleted file mode 100644 index 163295e0..00000000 --- a/codex/storageproofs/timing.nim +++ /dev/null @@ -1,4 +0,0 @@ -import ./timing/periods -import ./timing/proofs - -export periods, proofs diff --git a/codex/storageproofs/timing/proofs.nim b/codex/storageproofs/timing/proofs.nim deleted file mode 100644 index 6995a346..00000000 --- a/codex/storageproofs/timing/proofs.nim +++ /dev/null @@ -1,44 +0,0 @@ -import pkg/chronos -import pkg/stint -import pkg/upraises -import ./periods -import ../../contracts/requests - -export chronos -export stint -export periods -export requests - -type - Proofs* = ref object of RootObj - Subscription* = ref object of RootObj - OnProofSubmitted* = proc(id: SlotId, proof: seq[byte]) {.gcsafe, upraises:[].} - -method periodicity*(proofs: Proofs): - Future[Periodicity] {.base, async.} = - raiseAssert("not implemented") - -method isProofRequired*(proofs: Proofs, - id: SlotId): Future[bool] {.base, async.} = - raiseAssert("not implemented") - -method willProofBeRequired*(proofs: Proofs, - id: SlotId): Future[bool] {.base, async.} = - raiseAssert("not implemented") - -method getProofEnd*(proofs: Proofs, - id: SlotId): Future[UInt256] {.base, async.} = - raiseAssert("not implemented") - -method submitProof*(proofs: Proofs, - id: SlotId, - proof: seq[byte]) {.base, async.} = - raiseAssert("not implemented") - -method subscribeProofSubmission*(proofs: Proofs, - callback: OnProofSubmitted): - Future[Subscription] {.base, async.} = - raiseAssert("not implemented") - -method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} = - raiseAssert("not implemented") diff --git a/codex/stores.nim b/codex/stores.nim index 5b18f1f2..11e7c8df 100644 --- a/codex/stores.nim +++ b/codex/stores.nim @@ -1,6 +1,16 @@ import ./stores/cachestore import ./stores/blockstore import ./stores/networkstore -import ./stores/fsstore +import ./stores/repostore +import ./stores/maintenance +import ./stores/keyutils +import ./stores/treehelper -export cachestore, blockstore, networkstore, fsstore +export + cachestore, + blockstore, + networkstore, + repostore, + keyutils, + treehelper, + maintenance diff --git a/codex/stores/blockstore.nim b/codex/stores/blockstore.nim index 54f796f1..4921bebb 100644 --- a/codex/stores/blockstore.nim +++ b/codex/stores/blockstore.nim @@ -13,24 +13,99 @@ push: {.upraises: [].} import pkg/chronos import pkg/libp2p +import pkg/questionable import pkg/questionable/results +import ../clock import ../blocktype +import ../merkletree +import ../utils -export blocktype, libp2p +export blocktype type - OnBlock* = proc(cid: Cid): Future[void] {.upraises: [], gcsafe.} + BlockNotFoundError* = object of CodexError + + BlockType* {.pure.} = enum + Manifest, Block, Both + BlockStore* = ref object of RootObj method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} = ## Get a block from the blockstore ## + raiseAssert("getBlock by cid not implemented!") + +method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base.} = + ## Get a block from the blockstore + ## + + raiseAssert("getBlock by treecid not implemented!") + +method getCid*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Cid] {.base.} = + ## Get a cid given a tree and index + ## + raiseAssert("getCid by treecid not implemented!") + +method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base.} = + ## Get a block from the blockstore + ## + + raiseAssert("getBlock by addr not implemented!") + +method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base.} = + ## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree + ## + + raiseAssert("getBlockAndProof not implemented!") + +method putBlock*( + self: BlockStore, + blk: Block, + ttl = Duration.none): Future[?!void] {.base.} = + ## Put a block to the blockstore + ## + + raiseAssert("putBlock not implemented!") + +method putCidAndProof*( + self: BlockStore, + treeCid: Cid, + index: Natural, + blockCid: Cid, + proof: CodexProof): Future[?!void] {.base.} = + ## Put a block proof to the blockstore + ## + + raiseAssert("putCidAndProof not implemented!") + +method getCidAndProof*( + self: BlockStore, + treeCid: Cid, + index: Natural): Future[?!(Cid, CodexProof)] {.base.} = + ## Get a block proof from the blockstore + ## + + raiseAssert("getCidAndProof not implemented!") + +method ensureExpiry*( + self: BlockStore, + cid: Cid, + expiry: SecondsSince1970): Future[?!void] {.base.} = + ## Ensure that block's assosicated expiry is at least given timestamp + ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact + ## + raiseAssert("Not implemented!") -method putBlock*(self: BlockStore, blk: Block): Future[?!void] {.base.} = - ## Put a block to the blockstore +method ensureExpiry*( + self: BlockStore, + treeCid: Cid, + index: Natural, + expiry: SecondsSince1970): Future[?!void] {.base.} = + ## Ensure that block's associated expiry is at least given timestamp + ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## raiseAssert("Not implemented!") @@ -39,26 +114,40 @@ method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base.} = ## Delete a block from the blockstore ## - raiseAssert("Not implemented!") + raiseAssert("delBlock not implemented!") + +method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base.} = + ## Delete a block from the blockstore + ## + + raiseAssert("delBlock not implemented!") method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base.} = ## Check if the block exists in the blockstore ## - raiseAssert("Not implemented!") + raiseAssert("hasBlock not implemented!") -method listBlocks*(self: BlockStore, onBlock: OnBlock): Future[?!void] {.base.} = +method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base.} = + ## Check if the block exists in the blockstore + ## + + raiseAssert("hasBlock not implemented!") + +method listBlocks*( + self: BlockStore, + blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## - raiseAssert("Not implemented!") + raiseAssert("listBlocks not implemented!") -method close*(self: Blockstore): Future[void] {.base.} = +method close*(self: BlockStore): Future[void] {.base.} = ## Close the blockstore, cleaning up resources managed by it. ## For some implementations this may be a no-op ## - raiseAssert("Not implemented!") + raiseAssert("close not implemented!") proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} = ## Check if the block exists in the blockstore. @@ -66,3 +155,9 @@ proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} = ## return (await self.hasBlock(blk)) |? false + +proc contains*(self: BlockStore, address: BlockAddress): Future[bool] {.async.} = + return if address.leaf: + (await self.hasBlock(address.treeCid, address.index)) |? false + else: + (await self.hasBlock(address.cid)) |? false diff --git a/codex/stores/cachestore.nim b/codex/stores/cachestore.nim index aa8eeeb7..d6623373 100644 --- a/codex/stores/cachestore.nim +++ b/codex/stores/cachestore.nim @@ -7,14 +7,12 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import std/sequtils import pkg/upraises push: {.upraises: [].} import std/options -import pkg/chronicles import pkg/chronos import pkg/libp2p import pkg/lrucache @@ -22,8 +20,14 @@ import pkg/questionable import pkg/questionable/results import ./blockstore +import ../units import ../chunker import ../errors +import ../logutils +import ../manifest +import ../merkletree +import ../utils +import ../clock export blockstore @@ -32,16 +36,15 @@ logScope: type CacheStore* = ref object of BlockStore - currentSize*: Natural # in bytes - size*: Positive # in bytes + currentSize*: NBytes + size*: NBytes cache: LruCache[Cid, Block] + cidAndProofCache: LruCache[(Cid, Natural), (Cid, CodexProof)] InvalidBlockSize* = object of CodexError const - MiB* = 1024 * 1024 # bytes, 1 mebibyte = 1,048,576 bytes - DefaultCacheSizeMiB* = 5 - DefaultCacheSize* = DefaultCacheSizeMiB * MiB # bytes + DefaultCacheSize*: NBytes = 5.MiBs method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} = ## Get a block from the stores @@ -51,17 +54,52 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} = if cid.isEmpty: trace "Empty block, ignoring" - return success cid.emptyBlock + return cid.emptyBlock if cid notin self.cache: - return failure (ref BlockNotFoundError)(msg: "Block not in cache") + return failure (ref BlockNotFoundError)(msg: "Block not in cache " & $cid) try: return success self.cache[cid] + except CancelledError as error: + raise error except CatchableError as exc: trace "Error requesting block from cache", cid, error = exc.msg return failure exc +method getCidAndProof*( + self: CacheStore, + treeCid: Cid, + index: Natural): Future[?!(Cid, CodexProof)] {.async.} = + + if cidAndProof =? self.cidAndProofCache.getOption((treeCid, index)): + success(cidAndProof) + else: + failure(newException(BlockNotFoundError, "Block not in cache: " & $BlockAddress.init(treeCid, index))) + +method getBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} = + without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: + return failure(err) + + await self.getBlock(cidAndProof[0]) + +method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} = + without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: + return failure(err) + + let (cid, proof) = cidAndProof + + without blk =? await self.getBlock(cid), err: + return failure(err) + + success((blk, proof)) + +method getBlock*(self: CacheStore, address: BlockAddress): Future[?!Block] = + if address.leaf: + self.getBlock(address.treeCid, address.index) + else: + self.getBlock(address.cid) + method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} = ## Check if the block exists in the blockstore ## @@ -73,18 +111,60 @@ method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} = return (cid in self.cache).success -method listBlocks*(s: CacheStore, onBlock: OnBlock): Future[?!void] {.async.} = +method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} = + without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: + if err of BlockNotFoundError: + return success(false) + else: + return failure(err) + + await self.hasBlock(cidAndProof[0]) + +func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) = + return iterator(): Cid = + for cid in self.cache.keys: + yield cid + +method listBlocks*( + self: CacheStore, + blockType = BlockType.Manifest +): Future[?!AsyncIter[?Cid]] {.async.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## - for cid in toSeq(s.cache.keys): - await onBlock(cid) + let + cids = self.cids() - return success() + proc isFinished(): bool = + return finished(cids) + + proc genNext(): Future[Cid] {.async.} = + cids() + + let iter = await (AsyncIter[Cid].new(genNext, isFinished) + .filter( + proc (cid: Cid): Future[bool] {.async.} = + without isManifest =? cid.isManifest, err: + trace "Error checking if cid is a manifest", err = err.msg + return false + + case blockType: + of BlockType.Both: + return true + of BlockType.Manifest: + return isManifest + of BlockType.Block: + return not isManifest + )) + + return success(map[Cid, ?Cid](iter, + proc (cid: Cid): Future[?Cid] {.async.} = + some(cid) + )) func putBlockSync(self: CacheStore, blk: Block): bool = - let blkSize = blk.data.len # in bytes + let blkSize = blk.data.len.NBytes # in bytes if blkSize > self.size: trace "Block size is larger than cache size", blk = blkSize, cache = self.size @@ -93,7 +173,7 @@ func putBlockSync(self: CacheStore, blk: Block): bool = while self.currentSize + blkSize > self.size: try: let removed = self.cache.removeLru() - self.currentSize -= removed.data.len + self.currentSize -= removed.data.len.NBytes except EmptyLruCacheError as exc: # if the cache is empty, can't remove anything, so break and add item # to the cache @@ -104,7 +184,10 @@ func putBlockSync(self: CacheStore, blk: Block): bool = self.currentSize += blkSize return true -method putBlock*(self: CacheStore, blk: Block): Future[?!void] {.async.} = +method putBlock*( + self: CacheStore, + blk: Block, + ttl = Duration.none): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -116,6 +199,37 @@ method putBlock*(self: CacheStore, blk: Block): Future[?!void] {.async.} = discard self.putBlockSync(blk) return success() +method putCidAndProof*( + self: CacheStore, + treeCid: Cid, + index: Natural, + blockCid: Cid, + proof: CodexProof +): Future[?!void] {.async.} = + self.cidAndProofCache[(treeCid, index)] = (blockCid, proof) + success() + +method ensureExpiry*( + self: CacheStore, + cid: Cid, + expiry: SecondsSince1970 +): Future[?!void] {.async.} = + ## Updates block's assosicated TTL in store - not applicable for CacheStore + ## + + discard # CacheStore does not have notion of TTL + +method ensureExpiry*( + self: CacheStore, + treeCid: Cid, + index: Natural, + expiry: SecondsSince1970 +): Future[?!void] {.async.} = + ## Updates block's associated TTL in store - not applicable for CacheStore + ## + + discard # CacheStore does not have notion of TTL + method delBlock*(self: CacheStore, cid: Cid): Future[?!void] {.async.} = ## Delete a block from the blockstore ## @@ -127,7 +241,15 @@ method delBlock*(self: CacheStore, cid: Cid): Future[?!void] {.async.} = let removed = self.cache.del(cid) if removed.isSome: - self.currentSize -= removed.get.data.len + self.currentSize -= removed.get.data.len.NBytes + + return success() + +method delBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} = + let maybeRemoved = self.cidAndProofCache.del((treeCid, index)) + + if removed =? maybeRemoved: + return await self.delBlock(removed[0]) return success() @@ -137,22 +259,28 @@ method close*(self: CacheStore): Future[void] {.async.} = discard -func new*( +proc new*( _: type CacheStore, blocks: openArray[Block] = [], - cacheSize: Positive = DefaultCacheSize, # in bytes - chunkSize: Positive = DefaultChunkSize # in bytes - ): CacheStore {.raises: [Defect, ValueError].} = + cacheSize: NBytes = DefaultCacheSize, + chunkSize: NBytes = DefaultChunkSize +): CacheStore {.raises: [Defect, ValueError].} = + ## Create a new CacheStore instance + ## + ## `cacheSize` and `chunkSize` are both in bytes + ## if cacheSize < chunkSize: raise newException(ValueError, "cacheSize cannot be less than chunkSize") - var currentSize = 0 let - size = cacheSize div chunkSize + currentSize = 0'nb + size = int(cacheSize div chunkSize) cache = newLruCache[Cid, Block](size) + cidAndProofCache = newLruCache[(Cid, Natural), (Cid, CodexProof)](size) store = CacheStore( cache: cache, + cidAndProofCache: cidAndProofCache, currentSize: currentSize, size: cacheSize) @@ -160,3 +288,11 @@ func new*( discard store.putBlockSync(blk) return store + +proc new*( + _: type CacheStore, + blocks: openArray[Block] = [], + cacheSize: int, + chunkSize: int +): CacheStore {.raises: [Defect, ValueError].} = + CacheStore.new(blocks, NBytes cacheSize, NBytes chunkSize) diff --git a/codex/stores/fsstore.nim b/codex/stores/fsstore.nim deleted file mode 100644 index 10af63bd..00000000 --- a/codex/stores/fsstore.nim +++ /dev/null @@ -1,207 +0,0 @@ -## Nim-Codex -## Copyright (c) 2021 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -import pkg/upraises - -push: {.upraises: [].} - -import std/os - -import pkg/chronos -import pkg/chronicles -import pkg/libp2p -import pkg/questionable -import pkg/questionable/results -import pkg/stew/io2 - -import ./cachestore -import ./blockstore - -export blockstore - -logScope: - topics = "codex fsstore" - -type - FSStore* = ref object of BlockStore - cache: BlockStore - repoDir: string - postfixLen*: int - -template blockPath*(self: FSStore, cid: Cid): string = - self.repoDir / ($cid)[^self.postfixLen..^1] / $cid - -method getBlock*(self: FSStore, cid: Cid): Future[?!Block] {.async.} = - ## Get a block from the cache or filestore. - ## Save a copy to the cache if present in the filestore but not in the cache - ## - - if not self.cache.isNil: - trace "Getting block from cache or filestore", cid - else: - trace "Getting block from filestore", cid - - if cid.isEmpty: - trace "Empty block, ignoring" - return success cid.emptyBlock - - if not self.cache.isNil: - let - cachedBlockRes = await self.cache.getBlock(cid) - - if not cachedBlockRes.isErr: - return success cachedBlockRes.get - else: - trace "Unable to read block from cache", cid, error = cachedBlockRes.error.msg - - # Read file contents - var - data: seq[byte] - - let - path = self.blockPath(cid) - res = io2.readFile(path, data) - - if res.isErr: - if not isFile(path): # May be, check instead that "res.error == ERROR_FILE_NOT_FOUND" ? - return failure (ref BlockNotFoundError)(msg: "Block not in filestore") - else: - let - error = io2.ioErrorMsg(res.error) - - trace "Error requesting block from filestore", path, error - return failure "Error requesting block from filestore: " & error - - without blk =? Block.new(cid, data), error: - trace "Unable to construct block from data", cid, error = error.msg - return failure error - - if not self.cache.isNil: - let - putCachedRes = await self.cache.putBlock(blk) - - if putCachedRes.isErr: - trace "Unable to store block in cache", cid, error = putCachedRes.error.msg - - return success blk - -method putBlock*(self: FSStore, blk: Block): Future[?!void] {.async.} = - ## Write a block's contents to a file with name based on blk.cid. - ## Save a copy to the cache - ## - - if not self.cache.isNil: - trace "Putting block into filestore and cache", cid = blk.cid - else: - trace "Putting block into filestore", cid = blk.cid - - if blk.isEmpty: - trace "Empty block, ignoring" - return success() - - let path = self.blockPath(blk.cid) - if isFile(path): - return success() - - # If directory exists createPath wont fail - let dir = path.parentDir - if io2.createPath(dir).isErr: - trace "Unable to create block prefix dir", dir - return failure("Unable to create block prefix dir") - - let res = io2.writeFile(path, blk.data) - if res.isErr: - let error = io2.ioErrorMsg(res.error) - trace "Unable to store block", path, cid = blk.cid, error - return failure("Unable to store block") - - if not self.cache.isNil: - let - putCachedRes = await self.cache.putBlock(blk) - - if putCachedRes.isErr: - trace "Unable to store block in cache", cid = blk.cid, error = putCachedRes.error.msg - - return success() - -method delBlock*(self: FSStore, cid: Cid): Future[?!void] {.async.} = - ## Delete a block from the cache and filestore - ## - - if not self.cache.isNil: - trace "Deleting block from cache and filestore", cid - else: - trace "Deleting block from filestore", cid - - if cid.isEmpty: - trace "Empty block, ignoring" - return success() - - if not self.cache.isNil: - let - delCachedRes = await self.cache.delBlock(cid) - - if delCachedRes.isErr: - trace "Unable to delete block from cache", cid, error = delCachedRes.error.msg - - let - path = self.blockPath(cid) - res = io2.removeFile(path) - - if res.isErr: - let error = io2.ioErrorMsg(res.error) - trace "Unable to delete block", path, cid, error - return error.failure - - return success() - -method hasBlock*(self: FSStore, cid: Cid): Future[?!bool] {.async.} = - ## Check if a block exists in the filestore - ## - - trace "Checking filestore for block existence", cid - if cid.isEmpty: - trace "Empty block, ignoring" - return true.success - - return self.blockPath(cid).isFile().success - -method listBlocks*(self: FSStore, onBlock: OnBlock): Future[?!void] {.async.} = - ## Process list of all blocks in the filestore via callback. - ## This is an intensive operation - ## - - trace "Listing all blocks in filestore" - for (pkind, folderPath) in self.repoDir.walkDir(): - if pkind != pcDir: continue - if len(folderPath.basename) != self.postfixLen: continue - - for (fkind, filename) in folderPath.walkDir(relative = true): - if fkind != pcFile: continue - let cid = Cid.init(filename) - if cid.isOk: - await onBlock(cid.get()) - - return success() - -method close*(self: FSStore): Future[void] {.async.} = - ## Close the underlying cache - ## - - if not self.cache.isNil: await self.cache.close - -proc new*( - T: type FSStore, - repoDir: string, - postfixLen = 2, - cache: BlockStore = nil): T = - T( - postfixLen: postfixLen, - repoDir: repoDir, - cache: cache) diff --git a/codex/stores/keyutils.nim b/codex/stores/keyutils.nim new file mode 100644 index 00000000..1dbeccb4 --- /dev/null +++ b/codex/stores/keyutils.nim @@ -0,0 +1,49 @@ +## Nim-Codex +## Copyright (c) 2022 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/upraises +push: {.upraises: [].} + +import std/sugar +import pkg/questionable/results +import pkg/datastore +import pkg/libp2p +import ../namespaces +import ../manifest + +const + CodexMetaKey* = Key.init(CodexMetaNamespace).tryGet + CodexRepoKey* = Key.init(CodexRepoNamespace).tryGet + CodexBlocksKey* = Key.init(CodexBlocksNamespace).tryGet + CodexTotalBlocksKey* = Key.init(CodexBlockTotalNamespace).tryGet + CodexManifestKey* = Key.init(CodexManifestNamespace).tryGet + BlocksTtlKey* = Key.init(CodexBlocksTtlNamespace).tryGet + BlockProofKey* = Key.init(CodexBlockProofNamespace).tryGet + QuotaKey* = Key.init(CodexQuotaNamespace).tryGet + QuotaUsedKey* = (QuotaKey / "used").tryGet + QuotaReservedKey* = (QuotaKey / "reserved").tryGet + +func makePrefixKey*(postFixLen: int, cid: Cid): ?!Key = + let + cidKey = ? Key.init(($cid)[^postFixLen..^1] & "/" & $cid) + + if ? cid.isManifest: + success CodexManifestKey / cidKey + else: + success CodexBlocksKey / cidKey + +proc createBlockExpirationMetadataKey*(cid: Cid): ?!Key = + BlocksTtlKey / $cid + +proc createBlockExpirationMetadataQueryKey*(): ?!Key = + let queryString = ? (BlocksTtlKey / "*") + Key.init(queryString) + +proc createBlockCidAndProofMetadataKey*(treeCid: Cid, index: Natural): ?!Key = + (BlockProofKey / $treeCid).flatMap((k: Key) => k / $index) diff --git a/codex/stores/localstore.nim b/codex/stores/localstore.nim deleted file mode 100644 index 900825cd..00000000 --- a/codex/stores/localstore.nim +++ /dev/null @@ -1,118 +0,0 @@ -## Nim-Codex -## Copyright (c) 2022 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -import std/os - -import pkg/upraises - -push: {.upraises: [].} - -import pkg/chronos -import pkg/libp2p -import pkg/questionable -import pkg/questionable/results -import pkg/datastore - -import ./blockstore -import ../blocktype -import ../namespaces -import ../manifest - -export blocktype, libp2p - -const - CacheBytesKey* = CodexMetaNamespace / "bytes" / "cache" - CachePersistentKey* = CodexMetaNamespace / "bytes" / "persistent" - -type - LocalStore* = ref object of BlockStore - ds*: Datastore - blocksRepo*: BlockStore # TODO: Should be a Datastore - manifestRepo*: BlockStore # TODO: Should be a Datastore - cacheBytes*: uint - persistBytes*: uint - -method getBlock*(self: LocalStore, cid: Cid): Future[?!Block] = - ## Get a block from the blockstore - ## - - if cid.isManifest: - self.manifestRepo.getBlock(cid) - else: - self.blocksRepo.getBlock(cid) - -method putBlock*(self: LocalStore, blk: Block): Future[?!void] = - ## Put a block to the blockstore - ## - - if blk.cid.isManifest: - self.manifestRepo.putBlock(blk) - else: - self.blocksRepo.putBlock(blk) - -method delBlock*(self: LocalStore, cid: Cid): Future[?!void] = - ## Delete a block from the blockstore - ## - - if cid.isManifest: - self.manifestRepo.delBlock(cid) - else: - self.blocksRepo.delBlock(cid) - -method hasBlock*(self: LocalStore, cid: Cid): Future[?!bool] = - ## Check if the block exists in the blockstore - ## - - if cid.isManifest: - self.manifestRepo.hasBlock(cid) - else: - self.blocksRepo.hasBlock(cid) - -method listBlocks*( - self: LocalStore, - blkType: MultiCodec, - batch = 100, - onBlock: OnBlock): Future[?!void] = - ## Get the list of blocks in the LocalStore. - ## This is an intensive operation - ## - - if $blkType in ManifestContainers: - self.manifestRepo.listBlocks(blkType, batch, onBlock) - else: - self.blocksRepo.listBlocks(onBlock) - -method close*(self: LocalStore) {.async.} = - ## Close the blockstore, cleaning up resources managed by it. - ## For some implementations this may be a no-op - ## - - await self.manifestRepo.close() - await self.blocksRepo.close() - -proc contains*(self: LocalStore, blk: Cid): Future[bool] {.async.} = - ## Check if the block exists in the blockstore. - ## Return false if error encountered - ## - - return (await self.hasBlock(blk)) |? false - -func new*( - T: type LocalStore, - datastore: Datastore, - blocksRepo: BlockStore, - manifestRepo: BlockStore, - cacheBytes: uint, - persistBytes: uint): T = - T( - datastore: datastore, - blocksRepo: blocksRepo, - manifestRepo: manifestRepo, - cacheBytes: cacheBytes, - persistBytes: persistBytes) diff --git a/codex/stores/maintenance.nim b/codex/stores/maintenance.nim new file mode 100644 index 00000000..63c6ba40 --- /dev/null +++ b/codex/stores/maintenance.nim @@ -0,0 +1,101 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +## Store maintenance module +## Looks for and removes expired blocks from blockstores. + +import pkg/chronos +import pkg/questionable +import pkg/questionable/results + +import ./repostore +import ../utils/timer +import ../utils/asynciter +import ../clock +import ../logutils +import ../systemclock + +const + DefaultBlockMaintenanceInterval* = 10.minutes + DefaultNumberOfBlocksToMaintainPerInterval* = 1000 + +type + BlockMaintainer* = ref object of RootObj + repoStore: RepoStore + interval: Duration + timer: Timer + clock: Clock + numberOfBlocksPerInterval: int + offset: int + +proc new*( + T: type BlockMaintainer, + repoStore: RepoStore, + interval: Duration, + numberOfBlocksPerInterval = 100, + timer = Timer.new(), + clock: Clock = SystemClock.new() +): BlockMaintainer = + ## Create new BlockMaintainer instance + ## + ## Call `start` to begin looking for for expired blocks + ## + BlockMaintainer( + repoStore: repoStore, + interval: interval, + numberOfBlocksPerInterval: numberOfBlocksPerInterval, + timer: timer, + clock: clock, + offset: 0) + +proc deleteExpiredBlock(self: BlockMaintainer, cid: Cid): Future[void] {.async.} = + if isErr (await self.repoStore.delBlock(cid)): + trace "Unable to delete block from repoStore" + +proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[void] {.async} = + if be.expiry < self.clock.now: + await self.deleteExpiredBlock(be.cid) + else: + inc self.offset + +proc runBlockCheck(self: BlockMaintainer): Future[void] {.async.} = + let expirations = await self.repoStore.getBlockExpirations( + maxNumber = self.numberOfBlocksPerInterval, + offset = self.offset + ) + + without iter =? expirations, err: + trace "Unable to obtain blockExpirations iterator from repoStore" + return + + var numberReceived = 0 + for beFut in iter: + let be = await beFut + inc numberReceived + await self.processBlockExpiration(be) + await sleepAsync(1.millis) # cooperative scheduling + + # If we received fewer blockExpirations from the iterator than we asked for, + # We're at the end of the dataset and should start from 0 next time. + if numberReceived < self.numberOfBlocksPerInterval: + self.offset = 0 + +proc start*(self: BlockMaintainer) = + proc onTimer(): Future[void] {.async.} = + try: + await self.runBlockCheck() + except CancelledError as error: + raise error + except CatchableError as exc: + error "Unexpected exception in BlockMaintainer.onTimer(): ", msg=exc.msg + + self.timer.start(onTimer, self.interval) + +proc stop*(self: BlockMaintainer): Future[void] {.async.} = + await self.timer.stop() diff --git a/codex/stores/networkstore.nim b/codex/stores/networkstore.nim index 5c96a5f6..40758b94 100644 --- a/codex/stores/networkstore.nim +++ b/codex/stores/networkstore.nim @@ -7,19 +7,21 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: {.upraises: [].} +{.push raises: [].} -import pkg/chronicles import pkg/chronos import pkg/libp2p +import pkg/questionable/results -import ../blocktype as bt -import ../utils/asyncheapqueue - -import ./blockstore +import ../clock +import ../blocktype import ../blockexchange +import ../logutils +import ../merkletree +import ../utils/asyncheapqueue +import ../utils/asynciter +import ./blockstore export blockstore, blockexchange, asyncheapqueue @@ -31,34 +33,104 @@ type engine*: BlockExcEngine # blockexc decision engine localStore*: BlockStore # local block store -method getBlock*(self: NetworkStore, cid: Cid): Future[?!bt.Block] {.async.} = - ## Get a block from a remote peer - ## +method getBlock*(self: NetworkStore, address: BlockAddress): Future[?!Block] {.async.} = + without blk =? (await self.localStore.getBlock(address)), err: + if not (err of BlockNotFoundError): + error "Error getting block from local store", address, err = err.msg + return failure err - trace "Getting block from local store or network", cid + without newBlock =? (await self.engine.requestBlock(address)), err: + error "Unable to get block from exchange engine", address, err = err.msg + return failure err - without blk =? await self.localStore.getBlock(cid), error: - if not (error of BlockNotFoundError): return failure error - trace "Block not in local store", cid - # TODO: What if block isn't available in the engine too? - # TODO: add retrieved block to the local store - return (await self.engine.requestBlock(cid)).catch + return success newBlock return success blk -method putBlock*(self: NetworkStore, blk: bt.Block): Future[?!void] {.async.} = - ## Store block locally and notify the network +method getBlock*(self: NetworkStore, cid: Cid): Future[?!Block] = + ## Get a block from the blockstore ## - trace "Puting block into network store", cid = blk.cid + self.getBlock(BlockAddress.init(cid)) - let res = await self.localStore.putBlock(blk) +method getBlock*(self: NetworkStore, treeCid: Cid, index: Natural): Future[?!Block] = + ## Get a block from the blockstore + ## + + self.getBlock(BlockAddress.init(treeCid, index)) + +method putBlock*( + self: NetworkStore, + blk: Block, + ttl = Duration.none): Future[?!void] {.async.} = + ## Store block locally and notify the network + ## + let res = await self.localStore.putBlock(blk, ttl) if res.isErr: return res await self.engine.resolveBlocks(@[blk]) return success() +method putCidAndProof*( + self: NetworkStore, + treeCid: Cid, + index: Natural, + blockCid: Cid, + proof: CodexProof): Future[?!void] = + self.localStore.putCidAndProof(treeCid, index, blockCid, proof) + +method getCidAndProof*( + self: NetworkStore, + treeCid: Cid, + index: Natural): Future[?!(Cid, CodexProof)] = + ## Get a block proof from the blockstore + ## + + self.localStore.getCidAndProof(treeCid, index) + +method ensureExpiry*( + self: NetworkStore, + cid: Cid, + expiry: SecondsSince1970): Future[?!void] {.async.} = + ## Ensure that block's assosicated expiry is at least given timestamp + ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact + ## + + without blockCheck =? await self.localStore.hasBlock(cid), err: + return failure(err) + + if blockCheck: + return await self.localStore.ensureExpiry(cid, expiry) + else: + trace "Updating expiry - block not in local store", cid + + return success() + +method ensureExpiry*( + self: NetworkStore, + treeCid: Cid, + index: Natural, + expiry: SecondsSince1970): Future[?!void] {.async.} = + ## Ensure that block's associated expiry is at least given timestamp + ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact + ## + + without blockCheck =? await self.localStore.hasBlock(treeCid, index), err: + return failure(err) + + if blockCheck: + return await self.localStore.ensureExpiry(treeCid, index, expiry) + else: + trace "Updating expiry - block not in local store", treeCid, index + + return success() + +method listBlocks*( + self: NetworkStore, + blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] = + self.localStore.listBlocks(blockType) + method delBlock*(self: NetworkStore, cid: Cid): Future[?!void] = ## Delete a block from the blockstore ## @@ -79,15 +151,14 @@ method close*(self: NetworkStore): Future[void] {.async.} = ## Close the underlying local blockstore ## - if not self.localStore.isNil: await self.localStore.close + if not self.localStore.isNil: + await self.localStore.close proc new*( T: type NetworkStore, engine: BlockExcEngine, - localStore: BlockStore): T = - - let b = NetworkStore( - localStore: localStore, - engine: engine) - - return b + localStore: BlockStore +): NetworkStore = + ## Create new instance of a NetworkStore + ## + NetworkStore(localStore: localStore, engine: engine) diff --git a/codex/stores/queryiterhelper.nim b/codex/stores/queryiterhelper.nim new file mode 100644 index 00000000..7c51d215 --- /dev/null +++ b/codex/stores/queryiterhelper.nim @@ -0,0 +1,66 @@ +import pkg/questionable +import pkg/questionable/results +import pkg/chronos +import pkg/chronicles +import pkg/datastore/typedds + +import ../utils/asynciter + +type KeyVal*[T] = tuple[key: Key, value: T] + +proc toAsyncIter*[T]( + queryIter: QueryIter[T], + finishOnErr: bool = true + ): Future[?!AsyncIter[?!QueryResponse[T]]] {.async.} = + ## Converts `QueryIter[T]` to `AsyncIter[?!QueryResponse[T]]` and automatically + ## runs dispose whenever `QueryIter` finishes or whenever an error occurs (only + ## if the flag finishOnErr is set to true) + ## + + if queryIter.finished: + trace "Disposing iterator" + if error =? (await queryIter.dispose()).errorOption: + return failure(error) + return success(AsyncIter[?!QueryResponse[T]].empty()) + + var errOccurred = false + + proc genNext: Future[?!QueryResponse[T]] {.async.} = + let queryResOrErr = await queryIter.next() + + if queryResOrErr.isErr: + errOccurred = true + + if queryIter.finished or (errOccurred and finishOnErr): + trace "Disposing iterator" + if error =? (await queryIter.dispose()).errorOption: + return failure(error) + + return queryResOrErr + + proc isFinished(): bool = + queryIter.finished or (errOccurred and finishOnErr) + + AsyncIter[?!QueryResponse[T]].new(genNext, isFinished).success + +proc filterSuccess*[T]( + iter: AsyncIter[?!QueryResponse[T]] + ): Future[AsyncIter[tuple[key: Key, value: T]]] {.async.} = + ## Filters out any items that are not success + + proc mapping(resOrErr: ?!QueryResponse[T]): Future[?KeyVal[T]] {.async.} = + without res =? resOrErr, error: + error "Error occurred when getting QueryResponse", msg = error.msg + return KeyVal[T].none + + without key =? res.key: + warn "No key for a QueryResponse" + return KeyVal[T].none + + without value =? res.value, error: + error "Error occurred when getting a value from QueryResponse", msg = error.msg + return KeyVal[T].none + + (key: key, value: value).some + + await mapFilter[?!QueryResponse[T], KeyVal[T]](iter, mapping) diff --git a/codex/stores/repostore.nim b/codex/stores/repostore.nim new file mode 100644 index 00000000..5937cbfc --- /dev/null +++ b/codex/stores/repostore.nim @@ -0,0 +1,5 @@ +import ./repostore/store +import ./repostore/types +import ./repostore/coders + +export store, types, coders diff --git a/codex/stores/repostore/coders.nim b/codex/stores/repostore/coders.nim new file mode 100644 index 00000000..6fc78408 --- /dev/null +++ b/codex/stores/repostore/coders.nim @@ -0,0 +1,47 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. +## + +import std/sugar +import pkg/libp2p/cid +import pkg/serde/json +import pkg/stew/byteutils +import pkg/stew/endians2 + +import ./types +import ../../errors +import ../../merkletree +import ../../utils/json + +proc encode*(t: QuotaUsage): seq[byte] = t.toJson().toBytes() +proc decode*(T: type QuotaUsage, bytes: seq[byte]): ?!T = T.fromJson(bytes) + +proc encode*(t: BlockMetadata): seq[byte] = t.toJson().toBytes() +proc decode*(T: type BlockMetadata, bytes: seq[byte]): ?!T = T.fromJson(bytes) + +proc encode*(t: LeafMetadata): seq[byte] = t.toJson().toBytes() +proc decode*(T: type LeafMetadata, bytes: seq[byte]): ?!T = T.fromJson(bytes) + +proc encode*(t: DeleteResult): seq[byte] = t.toJson().toBytes() +proc decode*(T: type DeleteResult, bytes: seq[byte]): ?!T = T.fromJson(bytes) + +proc encode*(t: StoreResult): seq[byte] = t.toJson().toBytes() +proc decode*(T: type StoreResult, bytes: seq[byte]): ?!T = T.fromJson(bytes) + +proc encode*(i: uint64): seq[byte] = + @(i.toBytesBE) + +proc decode*(T: type uint64, bytes: seq[byte]): ?!T = + if bytes.len >= sizeof(uint64): + success(uint64.fromBytesBE(bytes)) + else: + failure("Not enough bytes to decode `uint64`") + +proc encode*(i: Natural | enum): seq[byte] = cast[uint64](i).encode +proc decode*(T: typedesc[Natural | enum], bytes: seq[byte]): ?!T = uint64.decode(bytes).map((ui: uint64) => cast[T](ui)) diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim new file mode 100644 index 00000000..e000bb0a --- /dev/null +++ b/codex/stores/repostore/operations.nim @@ -0,0 +1,213 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/chronos +import pkg/chronos/futures +import pkg/datastore +import pkg/datastore/typedds +import pkg/libp2p/cid +import pkg/metrics +import pkg/questionable +import pkg/questionable/results + +import ./coders +import ./types +import ../blockstore +import ../keyutils +import ../../blocktype +import ../../clock +import ../../logutils +import ../../merkletree + +logScope: + topics = "codex repostore" + +declareGauge(codex_repostore_blocks, "codex repostore blocks") +declareGauge(codex_repostore_bytes_used, "codex repostore bytes used") +declareGauge(codex_repostore_bytes_reserved, "codex repostore bytes reserved") + +proc putLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof): Future[?!StoreResultKind] {.async.} = + without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: + return failure(err) + + await self.metaDs.modifyGet(key, + proc (maybeCurrMd: ?LeafMetadata): Future[(?LeafMetadata, StoreResultKind)] {.async.} = + var + md: LeafMetadata + res: StoreResultKind + + if currMd =? maybeCurrMd: + md = currMd + res = AlreadyInStore + else: + md = LeafMetadata(blkCid: blkCid, proof: proof) + res = Stored + + (md.some, res) + ) + +proc getLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!LeafMetadata] {.async.} = + without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: + return failure(err) + + without leafMd =? await get[LeafMetadata](self.metaDs, key), err: + if err of DatastoreKeyNotFound: + return failure(newException(BlockNotFoundError, err.msg)) + else: + return failure(err) + + success(leafMd) + +proc updateTotalBlocksCount*(self: RepoStore, plusCount: Natural = 0, minusCount: Natural = 0): Future[?!void] {.async.} = + await self.metaDs.modify(CodexTotalBlocksKey, + proc (maybeCurrCount: ?Natural): Future[?Natural] {.async.} = + let count: Natural = + if currCount =? maybeCurrCount: + currCount + plusCount - minusCount + else: + plusCount - minusCount + + self.totalBlocks = count + codex_repostore_blocks.set(count.int64) + count.some + ) + +proc updateQuotaUsage*( + self: RepoStore, + plusUsed: NBytes = 0.NBytes, + minusUsed: NBytes = 0.NBytes, + plusReserved: NBytes = 0.NBytes, + minusReserved: NBytes = 0.NBytes +): Future[?!void] {.async.} = + await self.metaDs.modify(QuotaUsedKey, + proc (maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = + var usage: QuotaUsage + + if currUsage =? maybeCurrUsage: + usage = QuotaUsage(used: currUsage.used + plusUsed - minusUsed, reserved: currUsage.reserved + plusReserved - minusReserved) + else: + usage = QuotaUsage(used: plusUsed - minusUsed, reserved: plusReserved - minusReserved) + + if usage.used + usage.reserved > self.quotaMaxBytes: + raise newException(QuotaNotEnoughError, + "Quota usage would exceed the limit. Used: " & $usage.used & ", reserved: " & + $usage.reserved & ", limit: " & $self.quotaMaxBytes) + else: + self.quotaUsage = usage + codex_repostore_bytes_used.set(usage.used.int64) + codex_repostore_bytes_reserved.set(usage.reserved.int64) + return usage.some + ) + +proc updateBlockMetadata*( + self: RepoStore, + cid: Cid, + plusRefCount: Natural = 0, + minusRefCount: Natural = 0, + minExpiry: SecondsSince1970 = 0 +): Future[?!void] {.async.} = + if cid.isEmpty: + return success() + + without metaKey =? createBlockExpirationMetadataKey(cid), err: + return failure(err) + + await self.metaDs.modify(metaKey, + proc (maybeCurrBlockMd: ?BlockMetadata): Future[?BlockMetadata] {.async.} = + if currBlockMd =? maybeCurrBlockMd: + BlockMetadata( + size: currBlockMd.size, + expiry: max(currBlockMd.expiry, minExpiry), + refCount: currBlockMd.refCount + plusRefCount - minusRefCount + ).some + else: + raise newException(BlockNotFoundError, "Metadata for block with cid " & $cid & " not found") + ) + +proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Future[?!StoreResult] {.async.} = + if blk.isEmpty: + return success(StoreResult(kind: AlreadyInStore)) + + without metaKey =? createBlockExpirationMetadataKey(blk.cid), err: + return failure(err) + + without blkKey =? makePrefixKey(self.postFixLen, blk.cid), err: + return failure(err) + + await self.metaDs.modifyGet(metaKey, + proc (maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, StoreResult)] {.async.} = + var + md: BlockMetadata + res: StoreResult + + if currMd =? maybeCurrMd: + if currMd.size == blk.data.len.NBytes: + md = BlockMetadata(size: currMd.size, expiry: max(currMd.expiry, minExpiry), refCount: currMd.refCount) + res = StoreResult(kind: AlreadyInStore) + + # making sure that the block acutally is stored in the repoDs + without hasBlock =? await self.repoDs.has(blkKey), err: + raise err + + if not hasBlock: + warn "Block metadata is present, but block is absent. Restoring block.", cid = blk.cid + if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption: + raise err + else: + raise newException(CatchableError, "Repo already stores a block with the same cid but with a different size, cid: " & $blk.cid) + else: + md = BlockMetadata(size: blk.data.len.NBytes, expiry: minExpiry, refCount: 0) + res = StoreResult(kind: Stored, used: blk.data.len.NBytes) + if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption: + raise err + + (md.some, res) + ) + +proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low): Future[?!DeleteResult] {.async.} = + if cid.isEmpty: + return success(DeleteResult(kind: InUse)) + + without metaKey =? createBlockExpirationMetadataKey(cid), err: + return failure(err) + + without blkKey =? makePrefixKey(self.postFixLen, cid), err: + return failure(err) + + await self.metaDs.modifyGet(metaKey, + proc (maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, DeleteResult)] {.async.} = + var + maybeMeta: ?BlockMetadata + res: DeleteResult + + if currMd =? maybeCurrMd: + if currMd.refCount == 0 or currMd.expiry < expiryLimit: + maybeMeta = BlockMetadata.none + res = DeleteResult(kind: Deleted, released: currMd.size) + + if err =? (await self.repoDs.delete(blkKey)).errorOption: + raise err + else: + maybeMeta = currMd.some + res = DeleteResult(kind: InUse) + else: + maybeMeta = BlockMetadata.none + res = DeleteResult(kind: NotFound) + + # making sure that the block acutally is removed from the repoDs + without hasBlock =? await self.repoDs.has(blkKey), err: + raise err + + if hasBlock: + warn "Block metadata is absent, but block is present. Removing block.", cid + if err =? (await self.repoDs.delete(blkKey)).errorOption: + raise err + + (maybeMeta, res) + ) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim new file mode 100644 index 00000000..7d629131 --- /dev/null +++ b/codex/stores/repostore/store.nim @@ -0,0 +1,395 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/chronos +import pkg/chronos/futures +import pkg/datastore +import pkg/datastore/typedds +import pkg/libp2p/[cid, multicodec] +import pkg/questionable +import pkg/questionable/results + +import ./coders +import ./types +import ./operations +import ../blockstore +import ../keyutils +import ../queryiterhelper +import ../../blocktype +import ../../clock +import ../../logutils +import ../../merkletree +import ../../utils + +export blocktype, cid + +logScope: + topics = "codex repostore" + +########################################################### +# BlockStore API +########################################################### + +method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} = + ## Get a block from the blockstore + ## + + logScope: + cid = cid + + if cid.isEmpty: + trace "Empty block, ignoring" + return cid.emptyBlock + + without key =? makePrefixKey(self.postFixLen, cid), err: + trace "Error getting key from provider", err = err.msg + return failure(err) + + without data =? await self.repoDs.get(key), err: + if not (err of DatastoreKeyNotFound): + trace "Error getting block from datastore", err = err.msg, key + return failure(err) + + return failure(newException(BlockNotFoundError, err.msg)) + + trace "Got block for cid", cid + return Block.new(cid, data, verify = true) + +method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} = + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + return failure(err) + + without blk =? await self.getBlock(leafMd.blkCid), err: + return failure(err) + + success((blk, leafMd.proof)) + +method getBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} = + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + return failure(err) + + await self.getBlock(leafMd.blkCid) + +method getBlock*(self: RepoStore, address: BlockAddress): Future[?!Block] = + ## Get a block from the blockstore + ## + + if address.leaf: + self.getBlock(address.treeCid, address.index) + else: + self.getBlock(address.cid) + +method ensureExpiry*( + self: RepoStore, + cid: Cid, + expiry: SecondsSince1970 +): Future[?!void] {.async.} = + ## Ensure that block's associated expiry is at least given timestamp + ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact + ## + + if expiry <= 0: + return failure(newException(ValueError, "Expiry timestamp must be larger then zero")) + + await self.updateBlockMetadata(cid, minExpiry = expiry) + +method ensureExpiry*( + self: RepoStore, + treeCid: Cid, + index: Natural, + expiry: SecondsSince1970 +): Future[?!void] {.async.} = + ## Ensure that block's associated expiry is at least given timestamp + ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact + ## + + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + return failure(err) + + await self.ensureExpiry(leafMd.blkCid, expiry) + +method putCidAndProof*( + self: RepoStore, + treeCid: Cid, + index: Natural, + blkCid: Cid, + proof: CodexProof +): Future[?!void] {.async.} = + ## Put a block to the blockstore + ## + + logScope: + treeCid = treeCid + index = index + blkCid = blkCid + + trace "Storing LeafMetadata" + + without res =? await self.putLeafMetadata(treeCid, index, blkCid, proof), err: + return failure(err) + + if blkCid.mcodec == BlockCodec: + if res == Stored: + if err =? (await self.updateBlockMetadata(blkCid, plusRefCount = 1)).errorOption: + return failure(err) + trace "Leaf metadata stored, block refCount incremented" + else: + trace "Leaf metadata already exists" + + return success() + +method getCidAndProof*( + self: RepoStore, + treeCid: Cid, + index: Natural +): Future[?!(Cid, CodexProof)] {.async.} = + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + return failure(err) + + success((leafMd.blkCid, leafMd.proof)) + +method getCid*( + self: RepoStore, + treeCid: Cid, + index: Natural +): Future[?!Cid] {.async.} = + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + return failure(err) + + success(leafMd.blkCid) + +method putBlock*( + self: RepoStore, + blk: Block, + ttl = Duration.none): Future[?!void] {.async.} = + ## Put a block to the blockstore + ## + + logScope: + cid = blk.cid + + let expiry = self.clock.now() + (ttl |? self.blockTtl).seconds + + without res =? await self.storeBlock(blk, expiry), err: + return failure(err) + + if res.kind == Stored: + trace "Block Stored" + if err =? (await self.updateQuotaUsage(plusUsed = res.used)).errorOption: + # rollback changes + without delRes =? await self.tryDeleteBlock(blk.cid), err: + return failure(err) + return failure(err) + + if err =? (await self.updateTotalBlocksCount(plusCount = 1)).errorOption: + return failure(err) + else: + trace "Block already exists" + + return success() + +method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = + ## Delete a block from the blockstore when block refCount is 0 or block is expired + ## + + logScope: + cid = cid + + trace "Attempting to delete a block" + + without res =? await self.tryDeleteBlock(cid, self.clock.now()), err: + return failure(err) + + if res.kind == Deleted: + trace "Block deleted" + if err =? (await self.updateTotalBlocksCount(minusCount = 1)).errorOption: + return failure(err) + + if err =? (await self.updateQuotaUsage(minusUsed = res.released)).errorOption: + return failure(err) + elif res.kind == InUse: + trace "Block in use, refCount > 0 and not expired" + else: + trace "Block not found in store" + + return success() + +method delBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} = + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + if err of BlockNotFoundError: + return success() + else: + return failure(err) + + if err =? (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: + if not (err of BlockNotFoundError): + return failure(err) + + await self.delBlock(leafMd.blkCid) # safe delete, only if refCount == 0 + +method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = + ## Check if the block exists in the blockstore + ## + + logScope: + cid = cid + + if cid.isEmpty: + trace "Empty block, ignoring" + return success true + + without key =? makePrefixKey(self.postFixLen, cid), err: + trace "Error getting key from provider", err = err.msg + return failure(err) + + return await self.repoDs.has(key) + +method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} = + without leafMd =? await self.getLeafMetadata(treeCid, index), err: + if err of BlockNotFoundError: + return success(false) + else: + return failure(err) + + await self.hasBlock(leafMd.blkCid) + +method listBlocks*( + self: RepoStore, + blockType = BlockType.Manifest +): Future[?!AsyncIter[?Cid]] {.async.} = + ## Get the list of blocks in the RepoStore. + ## This is an intensive operation + ## + + var + iter = AsyncIter[?Cid]() + + let key = + case blockType: + of BlockType.Manifest: CodexManifestKey + of BlockType.Block: CodexBlocksKey + of BlockType.Both: CodexRepoKey + + let query = Query.init(key, value=false) + without queryIter =? (await self.repoDs.query(query)), err: + trace "Error querying cids in repo", blockType, err = err.msg + return failure(err) + + proc next(): Future[?Cid] {.async.} = + await idleAsync() + if queryIter.finished: + iter.finish + else: + if pair =? (await queryIter.next()) and cid =? pair.key: + doAssert pair.data.len == 0 + trace "Retrieved record from repo", cid + return Cid.init(cid.value).option + else: + return Cid.none + + iter.next = next + return success iter + +proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query = + let queryKey = ? createBlockExpirationMetadataQueryKey() + success Query.init(queryKey, offset = offset, limit = maxNumber) + +method getBlockExpirations*( + self: RepoStore, + maxNumber: int, + offset: int): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = + ## Get iterator with block expirations + ## + + without beQuery =? createBlockExpirationQuery(maxNumber, offset), err: + error "Unable to format block expirations query", err = err.msg + return failure(err) + + without queryIter =? await query[BlockMetadata](self.metaDs, beQuery), err: + error "Unable to execute block expirations query", err = err.msg + return failure(err) + + without asyncQueryIter =? await queryIter.toAsyncIter(), err: + error "Unable to convert QueryIter to AsyncIter", err = err.msg + return failure(err) + + let + filteredIter = await asyncQueryIter.filterSuccess() + blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, + proc (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = + without cid =? Cid.init(kv.key.value).mapFailure, err: + error "Failed decoding cid", err = err.msg + return BlockExpiration.none + + BlockExpiration(cid: cid, expiry: kv.value.expiry).some + ) + + success(blockExpIter) + +method close*(self: RepoStore): Future[void] {.async.} = + ## Close the blockstore, cleaning up resources managed by it. + ## For some implementations this may be a no-op + ## + + trace "Closing repostore" + + if not self.metaDs.isNil: + (await self.metaDs.close()).expect("Should meta datastore") + + if not self.repoDs.isNil: + (await self.repoDs.close()).expect("Should repo datastore") + +########################################################### +# RepoStore procs +########################################################### + +proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = + ## Reserve bytes + ## + + trace "Reserving bytes", bytes + + await self.updateQuotaUsage(plusReserved = bytes) + +proc release*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = + ## Release bytes + ## + + trace "Releasing bytes", bytes + + await self.updateQuotaUsage(minusReserved = bytes) + +proc start*(self: RepoStore): Future[void] {.async.} = + ## Start repo + ## + + if self.started: + trace "Repo already started" + return + + trace "Starting rep" + if err =? (await self.updateTotalBlocksCount()).errorOption: + raise newException(CodexError, err.msg) + + if err =? (await self.updateQuotaUsage()).errorOption: + raise newException(CodexError, err.msg) + + self.started = true + +proc stop*(self: RepoStore): Future[void] {.async.} = + ## Stop repo + ## + if not self.started: + trace "Repo is not started" + return + + trace "Stopping repo" + await self.close() + + self.started = false diff --git a/codex/stores/repostore/types.nim b/codex/stores/repostore/types.nim new file mode 100644 index 00000000..4338e63a --- /dev/null +++ b/codex/stores/repostore/types.nim @@ -0,0 +1,107 @@ +## Nim-Codex +## Copyright (c) 2024 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/chronos +import pkg/datastore +import pkg/datastore/typedds +import pkg/libp2p/cid + +import ../blockstore +import ../../clock +import ../../errors +import ../../merkletree +import ../../systemclock +import ../../units + +const + DefaultBlockTtl* = 24.hours + DefaultQuotaBytes* = 8.GiBs + +type + QuotaNotEnoughError* = object of CodexError + + RepoStore* = ref object of BlockStore + postFixLen*: int + repoDs*: Datastore + metaDs*: TypedDatastore + clock*: Clock + quotaMaxBytes*: NBytes + quotaUsage*: QuotaUsage + totalBlocks*: Natural + blockTtl*: Duration + started*: bool + + QuotaUsage* {.serialize.} = object + used*: NBytes + reserved*: NBytes + + BlockMetadata* {.serialize.} = object + expiry*: SecondsSince1970 + size*: NBytes + refCount*: Natural + + LeafMetadata* {.serialize.} = object + blkCid*: Cid + proof*: CodexProof + + BlockExpiration* {.serialize.} = object + cid*: Cid + expiry*: SecondsSince1970 + + DeleteResultKind* {.serialize.} = enum + Deleted = 0, # block removed from store + InUse = 1, # block not removed, refCount > 0 and not expired + NotFound = 2 # block not found in store + + DeleteResult* {.serialize.} = object + kind*: DeleteResultKind + released*: NBytes + + StoreResultKind* {.serialize.} = enum + Stored = 0, # new block stored + AlreadyInStore = 1 # block already in store + + StoreResult* {.serialize.} = object + kind*: StoreResultKind + used*: NBytes + +func quotaUsedBytes*(self: RepoStore): NBytes = + self.quotaUsage.used + +func quotaReservedBytes*(self: RepoStore): NBytes = + self.quotaUsage.reserved + +func totalUsed*(self: RepoStore): NBytes = + (self.quotaUsedBytes + self.quotaReservedBytes) + +func available*(self: RepoStore): NBytes = + return self.quotaMaxBytes - self.totalUsed + +func available*(self: RepoStore, bytes: NBytes): bool = + return bytes < self.available() + +func new*( + T: type RepoStore, + repoDs: Datastore, + metaDs: Datastore, + clock: Clock = SystemClock.new(), + postFixLen = 2, + quotaMaxBytes = DefaultQuotaBytes, + blockTtl = DefaultBlockTtl +): RepoStore = + ## Create new instance of a RepoStore + ## + RepoStore( + repoDs: repoDs, + metaDs: TypedDatastore.init(metaDs), + clock: clock, + postFixLen: postFixLen, + quotaMaxBytes: quotaMaxBytes, + blockTtl: blockTtl + ) diff --git a/codex/stores/treehelper.nim b/codex/stores/treehelper.nim new file mode 100644 index 00000000..485cbfc2 --- /dev/null +++ b/codex/stores/treehelper.nim @@ -0,0 +1,50 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/upraises + +push: {.upraises: [].} + +import std/sugar +import pkg/chronos +import pkg/chronos/futures +import pkg/metrics +import pkg/questionable +import pkg/questionable/results + +import ./blockstore +import ../utils/asynciter +import ../merkletree + +proc putSomeProofs*(store: BlockStore, tree: CodexTree, iter: Iter[int]): Future[?!void] {.async.} = + without treeCid =? tree.rootCid, err: + return failure(err) + + for i in iter: + if i notin 0.. i.ord)) + +proc putAllProofs*(store: BlockStore, tree: CodexTree): Future[?!void] = + store.putSomeProofs(tree, Iter[int].new(0..= self.size method readOnce*( - self: StoreStream, - pbytes: pointer, - nbytes: int): Future[int] {.async.} = + self: StoreStream, + pbytes: pointer, + nbytes: int +): Future[int] {.async.} = ## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`. ## Return how many bytes were actually read before EOF was encountered. ## Raise exception if we are already at EOF. + ## - trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len if self.atEof: raise newLPStreamEOFError() @@ -80,21 +93,24 @@ method readOnce*( # Compute from the current stream position `self.offset` the block num/offset to read # Compute how many bytes to read from this block let - blockNum = self.offset div self.manifest.blockSize - blockOffset = self.offset mod self.manifest.blockSize - readBytes = min([self.size - self.offset, nbytes - read, self.manifest.blockSize - blockOffset]) + blockNum = self.offset div self.manifest.blockSize.int + blockOffset = self.offset mod self.manifest.blockSize.int + readBytes = min([self.size - self.offset, + nbytes - read, + self.manifest.blockSize.int - blockOffset]) + address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum) # Read contents of block `blockNum` - without blk =? await self.store.getBlock(self.manifest[blockNum]), error: + without blk =? await self.store.getBlock(address), error: raise newLPStreamReadError(error) - trace "Reading bytes from store stream", blockNum, cid = blk.cid, bytes = readBytes, blockOffset + trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset # Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf if blk.isEmpty: zeroMem(pbytes.offset(read), readBytes) else: - copyMem(pbytes.offset(read), blk.data[blockOffset].addr, readBytes) + copyMem(pbytes.offset(read), blk.data[blockOffset].unsafeAddr, readBytes) # Update current positions in the stream and outbuf self.offset += readBytes diff --git a/codex/systemclock.nim b/codex/systemclock.nim new file mode 100644 index 00000000..25ac4216 --- /dev/null +++ b/codex/systemclock.nim @@ -0,0 +1,10 @@ +import std/times +import pkg/upraises +import ./clock + +type + SystemClock* = ref object of Clock + +method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} = + let now = times.now().utc + now.toTime().toUnix() diff --git a/codex/units.nim b/codex/units.nim new file mode 100644 index 00000000..52f44328 --- /dev/null +++ b/codex/units.nim @@ -0,0 +1,77 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. +## + +import std/hashes +import std/strutils + +import pkg/upraises + +import ./logutils + +type + NBytes* = distinct Natural + +template basicMaths(T: untyped) = + proc `+` *(x: T, y: static[int]): T = T(`+`(x.Natural, y.Natural)) + proc `-` *(x: T, y: static[int]): T = T(`-`(x.Natural, y.Natural)) + proc `*` *(x: T, y: static[int]): T = T(`*`(x.Natural, y.Natural)) + proc `+` *(x, y: T): T = T(`+`(x.Natural, y.Natural)) + proc `-` *(x, y: T): T = T(`-`(x.Natural, y.Natural)) + proc `*` *(x, y: T): T = T(`*`(x.Natural, y.Natural)) + proc `<` *(x, y: T): bool {.borrow.} + proc `<=` *(x, y: T): bool {.borrow.} + proc `==` *(x, y: T): bool {.borrow.} + proc `+=` *(x: var T, y: T) {.borrow.} + proc `-=` *(x: var T, y: T) {.borrow.} + proc `hash` *(x: T): Hash {.borrow.} + +template divMaths(T: untyped) = + proc `mod` *(x, y: T): T = T(`mod`(x.Natural, y.Natural)) + proc `div` *(x, y: T): Natural = `div`(x.Natural, y.Natural) + # proc `/` *(x, y: T): Natural = `/`(x.Natural, y.Natural) + +basicMaths(NBytes) +divMaths(NBytes) + +proc `$`*(ts: NBytes): string = $(int(ts)) & "'NByte" +proc `'nb`*(n: string): NBytes = parseInt(n).NBytes + +logutils.formatIt(NBytes): $it + +const + KiB = 1024.NBytes # ByteSz, 1 kibibyte = 1,024 ByteSz + MiB = KiB * 1024 # ByteSz, 1 mebibyte = 1,048,576 ByteSz + GiB = MiB * 1024 # ByteSz, 1 gibibyte = 1,073,741,824 ByteSz + +proc KiBs*(v: Natural): NBytes = v.NBytes * KiB +proc MiBs*(v: Natural): NBytes = v.NBytes * MiB +proc GiBs*(v: Natural): NBytes = v.NBytes * GiB + +func divUp*[T: NBytes](a, b : T): int = + ## Division with result rounded up (rather than truncated as in 'div') + assert(b != T(0)) + if a==T(0): int(0) else: int( ((a - T(1)) div b) + 1 ) + +when isMainModule: + + import unittest2 + + suite "maths": + test "basics": + let x = 5.NBytes + let y = 10.NBytes + check x + y == 15.NBytes + expect RangeDefect: + check x - y == 10.NBytes + check y - x == 5.NBytes + check x * y == 50.NBytes + check y div x == 2 + check y > x == true + check y <= x == false diff --git a/codex/utils.nim b/codex/utils.nim index e5f21ef2..17dd924f 100644 --- a/codex/utils.nim +++ b/codex/utils.nim @@ -1,15 +1,96 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. +## + +import std/enumerate +import std/parseutils +import std/options + +import pkg/chronos + import ./utils/asyncheapqueue import ./utils/fileutils +import ./utils/asynciter -export asyncheapqueue, fileutils +export asyncheapqueue, fileutils, asynciter, chronos -func divUp*[T](a, b : T): T = +func divUp*[T: SomeInteger](a, b : T): T = ## Division with result rounded up (rather than truncated as in 'div') - assert(b != 0) - if a==0: 0 else: ((a - 1) div b) + 1 + assert(b != T(0)) + if a==T(0): T(0) else: ((a - T(1)) div b) + T(1) func roundUp*[T](a, b : T): T = ## Round up 'a' to the next value divisible by 'b' divUp(a,b) * b +proc orElse*[A](a, b: Option[A]): Option[A] = + if (a.isSome()): + a + else: + b + +template findIt*(s, pred: untyped): untyped = + ## Returns the index of the first object matching a predicate, or -1 if no + ## object matches it. + runnableExamples: + type MyType = object + att: int + + var s = @[MyType(att: 1), MyType(att: 2), MyType(att: 3)] + doAssert s.findIt(it.att == 2) == 1 + doAssert s.findIt(it.att == 4) == -1 + + var index = -1 + for i, it {.inject.} in enumerate(items(s)): + if pred: + index = i + break + index + +when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine + const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'} + + func toLowerAscii(c: char): char = + if c in {'A'..'Z'}: char(uint8(c) xor 0b0010_0000'u8) else: c + + func parseDuration*(s: string, size: var Duration): int = + ## Parse a size qualified by simple time into `Duration`. + ## + runnableExamples: + var res: Duration # caller must still know if 'b' refers to bytes|bits + doAssert parseDuration("10H", res) == 3 + doAssert res == initDuration(hours=10) + doAssert parseDuration("64m", res) == 6 + doAssert res == initDuration(minutes=64) + doAssert parseDuration("7m/block", res) == 2 # '/' stops parse + doAssert res == initDuration(minutes=7) # 1 shl 30, forced binary metric + doAssert parseDuration("3d", res) == 2 # '/' stops parse + doAssert res == initDuration(days=3) # 1 shl 30, forced binary metric + + const prefix = "s" & "mhdw" # byte|bit & lowCase metric-ish prefixes + const timeScale = [1.0, 60.0, 3600.0, 86_400.0, 604_800.0] + + var number: float + var scale = 1.0 + result = parseFloat(s, number) + if number < 0: # While parseFloat accepts negatives .. + result = 0 #.. we do not since sizes cannot be < 0 + else: + let start = result # Save spot to maybe unwind white to EOS + while result < s.len and s[result] in Whitespace: + inc result + if result < s.len: # Illegal starting char => unity + if (let si = prefix.find(s[result].toLowerAscii); si >= 0): + inc result # Now parse the scale + scale = timeScale[si] + else: # Unwind result advancement when there.. + result = start #..is no unit to the end of `s`. + var sizeF = number * scale + 0.5 # Saturate to int64.high when too big + size = seconds(int(sizeF)) diff --git a/codex/utils/addrutils.nim b/codex/utils/addrutils.nim index 6ae00e39..f044581a 100644 --- a/codex/utils/addrutils.nim +++ b/codex/utils/addrutils.nim @@ -17,9 +17,10 @@ import pkg/libp2p import pkg/stew/shims/net func remapAddr*( - address: MultiAddress, - ip: Option[ValidIpAddress] = ValidIpAddress.none, - port: Option[Port] = Port.none): MultiAddress = + address: MultiAddress, + ip: Option[ValidIpAddress] = ValidIpAddress.none, + port: Option[Port] = Port.none +): MultiAddress = ## Remap addresses to new IP and/or Port ## diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index b85f7de7..e7d7edad 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -34,8 +34,9 @@ type Empty, Full proc newAsyncHeapQueue*[T]( - maxsize: int = 0, - queueType: QueueType = QueueType.Min): AsyncHeapQueue[T] = + maxsize: int = 0, + queueType: QueueType = QueueType.Min +): AsyncHeapQueue[T] = ## Creates a new asynchronous queue ``AsyncHeapQueue``. ## @@ -58,7 +59,7 @@ proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} = break if i > 0: - waiters.delete(0, i - 1) + waiters.delete(0..(i-1)) proc heapCmp[T](x, y: T, max: bool = false): bool {.inline.} = if max: @@ -282,7 +283,7 @@ proc len*[T](heap: AsyncHeapQueue[T]): int {.inline.} = proc size*[T](heap: AsyncHeapQueue[T]): int {.inline.} = ## Return the maximum number of elements in ``heap``. - len(heap.maxsize) + heap.maxsize proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural) : T {.inline.} = ## Access the i-th element of ``heap`` by order from first to last. diff --git a/codex/utils/asynciter.nim b/codex/utils/asynciter.nim new file mode 100644 index 00000000..a1779552 --- /dev/null +++ b/codex/utils/asynciter.nim @@ -0,0 +1,165 @@ +import std/sugar + +import pkg/questionable +import pkg/chronos + +import ./iter + +export iter + +## AsyncIter[T] is similar to `Iter[Future[T]]` with addition of methods specific to asynchronous processing +## + +type + AsyncIter*[T] = ref object + finished: bool + next*: GenNext[Future[T]] + +proc finish*[T](self: AsyncIter[T]): void = + self.finished = true + +proc finished*[T](self: AsyncIter[T]): bool = + self.finished + +iterator items*[T](self: AsyncIter[T]): Future[T] = + while not self.finished: + yield self.next() + +iterator pairs*[T](self: AsyncIter[T]): tuple[key: int, val: Future[T]] {.inline.} = + var i = 0 + while not self.finished: + yield (i, self.next()) + inc(i) + +proc map*[T, U](fut: Future[T], fn: Function[T, U]): Future[U] {.async.} = + let t = await fut + fn(t) + +proc flatMap*[T, U](fut: Future[T], fn: Function[T, Future[U]]): Future[U] {.async.} = + let t = await fut + await fn(t) + +proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFinished, finishOnErr: bool = true): AsyncIter[T] = + ## Creates a new Iter using elements returned by supplier function `genNext`. + ## Iter is finished whenever `isFinished` returns true. + ## + + var iter = AsyncIter[T]() + + proc next(): Future[T] {.async.} = + if not iter.finished: + var item: T + try: + item = await genNext() + except CancelledError as err: + iter.finish + raise err + except CatchableError as err: + if finishOnErr or isFinished(): + iter.finish + raise err + + if isFinished(): + iter.finish + return item + else: + raise newException(CatchableError, "AsyncIter is finished but next item was requested") + + if isFinished(): + iter.finish + + iter.next = next + return iter + +proc mapAsync*[T, U](iter: Iter[T], fn: Function[T, Future[U]]): AsyncIter[U] = + AsyncIter[U].new( + genNext = () => fn(iter.next()), + isFinished = () => iter.finished() + ) + +proc new*[U, V: Ordinal](_: type AsyncIter[U], slice: HSlice[U, V]): AsyncIter[U] = + ## Creates new Iter from a slice + ## + + let iter = Iter[U].new(slice) + mapAsync[U, U](iter, + proc (i: U): Future[U] {.async.} = + i + ) + +proc new*[U, V, S: Ordinal](_: type AsyncIter[U], a: U, b: V, step: S = 1): AsyncIter[U] = + ## Creates new Iter in range a..b with specified step (default 1) + ## + + let iter = Iter[U].new(a, b, step) + mapAsync[U, U](iter, + proc (i: U): Future[U] {.async.} = + i + ) + +proc empty*[T](_: type AsyncIter[T]): AsyncIter[T] = + ## Creates an empty AsyncIter + ## + + proc genNext(): Future[T] {.raises: [CatchableError].} = + raise newException(CatchableError, "Next item requested from an empty AsyncIter") + proc isFinished(): bool = true + + AsyncIter[T].new(genNext, isFinished) + +proc map*[T, U](iter: AsyncIter[T], fn: Function[T, Future[U]]): AsyncIter[U] = + AsyncIter[U].new( + genNext = () => iter.next().flatMap(fn), + isFinished = () => iter.finished + ) + +proc mapFilter*[T, U](iter: AsyncIter[T], mapPredicate: Function[T, Future[Option[U]]]): Future[AsyncIter[U]] {.async.} = + var nextFutU: Option[Future[U]] + + proc tryFetch(): Future[void] {.async.} = + nextFutU = Future[U].none + while not iter.finished: + let futT = iter.next() + try: + if u =? await futT.flatMap(mapPredicate): + let futU = newFuture[U]("AsyncIter.mapFilterAsync") + futU.complete(u) + nextFutU = some(futU) + break + except CancelledError as err: + raise err + except CatchableError as err: + let errFut = newFuture[U]("AsyncIter.mapFilterAsync") + errFut.fail(err) + nextFutU = some(errFut) + break + + proc genNext(): Future[U] {.async.} = + let futU = nextFutU.unsafeGet + await tryFetch() + await futU + + proc isFinished(): bool = + nextFutU.isNone + + await tryFetch() + AsyncIter[U].new(genNext, isFinished) + +proc filter*[T](iter: AsyncIter[T], predicate: Function[T, Future[bool]]): Future[AsyncIter[T]] {.async.} = + proc wrappedPredicate(t: T): Future[Option[T]] {.async.} = + if await predicate(t): + some(t) + else: + T.none + + await mapFilter[T, T](iter, wrappedPredicate) + +proc delayBy*[T](iter: AsyncIter[T], d: Duration): AsyncIter[T] = + ## Delays emitting each item by given duration + ## + + map[T, T](iter, + proc (t: T): Future[T] {.async.} = + await sleepAsync(d) + t + ) diff --git a/codex/utils/asyncspawn.nim b/codex/utils/asyncspawn.nim new file mode 100644 index 00000000..6717e5e1 --- /dev/null +++ b/codex/utils/asyncspawn.nim @@ -0,0 +1,10 @@ +import pkg/chronos + +proc asyncSpawn*(future: Future[void], ignore: type CatchableError) = + proc ignoringError {.async.} = + try: + await future + except ignore: + discard + asyncSpawn ignoringError() + diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim new file mode 100644 index 00000000..9aeb3eab --- /dev/null +++ b/codex/utils/asyncstatemachine.nim @@ -0,0 +1,112 @@ +import std/sugar +import pkg/questionable +import pkg/chronos +import pkg/upraises +import ../logutils +import ./then +import ./trackedfutures + +push: {.upraises:[].} + +type + Machine* = ref object of RootObj + state: State + running: Future[void] + scheduled: AsyncQueue[Event] + started: bool + trackedFutures: TrackedFutures + State* = ref object of RootObj + Query*[T] = proc(state: State): T + Event* = proc(state: State): ?State {.gcsafe, upraises:[].} + +logScope: + topics = "statemachine" + +proc new*[T: Machine](_: type T): T = + T(trackedFutures: TrackedFutures.new()) + +method `$`*(state: State): string {.base.} = + raiseAssert "not implemented" + +proc transition(_: type Event, previous, next: State): Event = + return proc (state: State): ?State = + if state == previous: + return some next + +proc query*[T](machine: Machine, query: Query[T]): ?T = + if machine.state.isNil: + none T + else: + some query(machine.state) + +proc schedule*(machine: Machine, event: Event) = + if not machine.started: + return + + try: + machine.scheduled.putNoWait(event) + except AsyncQueueFullError: + raiseAssert "unlimited queue is full?!" + +method run*(state: State, machine: Machine): Future[?State] {.base, async.} = + discard + +method onError*(state: State, error: ref CatchableError): ?State {.base.} = + raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error) + +proc onError(machine: Machine, error: ref CatchableError): Event = + return proc (state: State): ?State = + state.onError(error) + +proc run(machine: Machine, state: State) {.async.} = + try: + if next =? await state.run(machine): + machine.schedule(Event.transition(state, next)) + except CancelledError: + discard + +proc scheduler(machine: Machine) {.async.} = + var running: Future[void] + try: + while machine.started: + let event = await machine.scheduled.get().track(machine) + if next =? event(machine.state): + if not running.isNil and not running.finished: + await running.cancelAndWait() + let fromState = if machine.state.isNil: "" else: $machine.state + machine.state = next + debug "enter state", state = machine.state, fromState + running = machine.run(machine.state) + running + .track(machine) + .catch((err: ref CatchableError) => + machine.schedule(machine.onError(err)) + ) + except CancelledError: + discard + +proc start*(machine: Machine, initialState: State) = + if machine.started: + return + + if machine.scheduled.isNil: + machine.scheduled = newAsyncQueue[Event]() + + machine.started = true + machine.scheduler() + .track(machine) + .catch((err: ref CatchableError) => + error("Error in scheduler", error = err.msg) + ) + machine.schedule(Event.transition(machine.state, initialState)) + +proc stop*(machine: Machine) {.async.} = + if not machine.started: + return + + trace "stopping state machine" + + machine.started = false + await machine.trackedFutures.cancelTracked() + + machine.state = nil diff --git a/codex/utils/digest.nim b/codex/utils/digest.nim new file mode 100644 index 00000000..4b3e68bf --- /dev/null +++ b/codex/utils/digest.nim @@ -0,0 +1,8 @@ + +from pkg/libp2p import MultiHash + +func digestBytes*(mhash: MultiHash): seq[byte] = + ## Extract hash digestBytes + ## + + mhash.data.buffer[mhash.dpos.. 0 and i > b) or + (step < 0 and i < b) + + Iter[U].new(genNext, isFinished) + +proc new*[U, V: Ordinal](_: type Iter[U], slice: HSlice[U, V]): Iter[U] = + ## Creates a new Iter from a slice + ## + + Iter[U].new(slice.a.int, slice.b.int, 1) + +proc new*[T](_: type Iter[T], items: seq[T]): Iter[T] = + ## Creates a new Iter from a sequence + ## + + Iter[int].new(0.. items[i]) + +proc empty*[T](_: type Iter[T]): Iter[T] = + ## Creates an empty Iter + ## + + proc genNext(): T {.raises: [CatchableError].} = + raise newException(CatchableError, "Next item requested from an empty Iter") + proc isFinished(): bool = true + + Iter[T].new(genNext, isFinished) + +proc map*[T, U](iter: Iter[T], fn: Function[T, U]): Iter[U] = + Iter[U].new( + genNext = () => fn(iter.next()), + isFinished = () => iter.finished + ) + +proc mapFilter*[T, U](iter: Iter[T], mapPredicate: Function[T, Option[U]]): Iter[U] = + var nextUOrErr: Option[Result[U, ref CatchableError]] + + proc tryFetch(): void = + nextUOrErr = Result[U, ref CatchableError].none + while not iter.finished: + try: + let t = iter.next() + if u =? mapPredicate(t): + nextUOrErr = some(success(u)) + break + except CatchableError as err: + nextUOrErr = some(U.failure(err)) + + proc genNext(): U {.raises: [CatchableError].} = + # at this point nextUOrErr should always be some(..) + without u =? nextUOrErr.unsafeGet, err: + raise err + + tryFetch() + return u + + proc isFinished(): bool = + nextUOrErr.isNone + + tryFetch() + Iter[U].new(genNext, isFinished) + +proc filter*[T](iter: Iter[T], predicate: Function[T, bool]): Iter[T] = + proc wrappedPredicate(t: T): Option[T] = + if predicate(t): + some(t) + else: + T.none + + mapFilter[T, T](iter, wrappedPredicate) diff --git a/codex/utils/json.nim b/codex/utils/json.nim new file mode 100644 index 00000000..4113b632 --- /dev/null +++ b/codex/utils/json.nim @@ -0,0 +1,34 @@ + +import std/options +import std/typetraits +from pkg/ethers import Address +from pkg/libp2p import Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$` +import pkg/contractabi +import pkg/codexdht/discv5/node as dn +import pkg/serde/json +import pkg/questionable/results +import ../errors + +export json + + +proc fromJson*( + _: type Cid, + json: JsonNode +): ?!Cid = + expectJsonKind(Cid, JString, json) + Cid.init(json.str).mapFailure + +func `%`*(cid: Cid): JsonNode = % $cid + +func `%`*(obj: PeerId): JsonNode = % $obj + +func `%`*(obj: SignedPeerRecord): JsonNode = % $obj + +func `%`*(obj: dn.Address): JsonNode = % $obj + +func `%`*(obj: AddressInfo): JsonNode = % $obj.address + +func `%`*(obj: MultiAddress): JsonNode = % $obj + +func `%`*(address: ethers.Address): JsonNode = % $address diff --git a/codex/utils/keyutils.nim b/codex/utils/keyutils.nim index 2280da75..c7f76263 100644 --- a/codex/utils/keyutils.nim +++ b/codex/utils/keyutils.nim @@ -1,4 +1,3 @@ - ## Nim-Codex ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of @@ -11,19 +10,15 @@ import pkg/upraises push: {.upraises: [].} -import std/os - -import pkg/chronicles import pkg/questionable/results -import pkg/libp2p +import pkg/libp2p/crypto/crypto import ./fileutils -import ../conf import ../errors +import ../logutils import ../rng -const - SafePermissions = {UserRead, UserWrite} +export crypto type CodexKeyError = object of CodexError @@ -45,6 +40,5 @@ proc setupKey*(path: string): ?!PrivateKey = return failure newException( CodexKeyUnsafeError, "The network private key file is not safe") - return PrivateKey.init( - ? path.readAllBytes().mapFailure(CodexKeyError)) - .mapFailure(CodexKeyError) + let kb = ? path.readAllBytes().mapFailure(CodexKeyError) + return PrivateKey.init(kb).mapFailure(CodexKeyError) diff --git a/codex/utils/optionalcast.nim b/codex/utils/optionalcast.nim deleted file mode 100644 index 15830769..00000000 --- a/codex/utils/optionalcast.nim +++ /dev/null @@ -1,16 +0,0 @@ -import pkg/questionable -import pkg/questionable/operators - -export questionable - -proc `as`*[T](value: T, U: type): ?U = - ## Casts a value to another type, returns an Option. - ## When the cast succeeds, the option will contain the casted value. - ## When the cast fails, the option will have no value. - when value is U: - return some value - elif value is ref object: - if value of U: - return some U(value) - -Option.liftBinary `as` diff --git a/codex/utils/options.nim b/codex/utils/options.nim new file mode 100644 index 00000000..115af782 --- /dev/null +++ b/codex/utils/options.nim @@ -0,0 +1,61 @@ +import macros +import strutils +import pkg/questionable +import pkg/questionable/operators + +export questionable + +proc `as`*[T](value: T, U: type): ?U = + ## Casts a value to another type, returns an Option. + ## When the cast succeeds, the option will contain the casted value. + ## When the cast fails, the option will have no value. + when value is U: + return some value + elif value is ref object: + if value of U: + return some U(value) + +Option.liftBinary `as` + +# Template that wraps type with `Option[]` only if it is already not `Option` type +template WrapOption*(input: untyped): type = + when input is Option: + input + else: + Option[input] + + +macro createType(t: typedesc): untyped = + var objectType = getType(t) + + # Work around for https://github.com/nim-lang/Nim/issues/23112 + while objectType.kind == nnkBracketExpr and objectType[0].eqIdent"typeDesc": + objectType = getType(objectType[1]) + + expectKind(objectType, NimNodeKind.nnkObjectTy) + var fields = nnkRecList.newTree() + + # Generates the list of fields that are wrapped in `Option[T]`. + # Technically wrapped with `WrapOption` which is template used to prevent + # re-wrapping already filed which is `Option[T]`. + for field in objectType[2]: + let fieldType = getTypeInst(field) + let newFieldNode = + nnkIdentDefs.newTree(ident($field), nnkCall.newTree(ident("WrapOption"), fieldType), newEmptyNode()) + + fields.add(newFieldNode) + + # Creates new object type T with the fields lists from steps above. + let tSym = genSym(nskType, "T") + nnkStmtList.newTree( + nnkTypeSection.newTree( + nnkTypeDef.newTree(tSym, newEmptyNode(), nnkObjectTy.newTree(newEmptyNode(), newEmptyNode(), fields)) + ), + tSym + ) + +template Optionalize*(t: typed): untyped = + ## Takes object type and wraps all the first level fields into + ## Option type unless it is already Option type. + createType(t) + diff --git a/codex/utils/poseidon2digest.nim b/codex/utils/poseidon2digest.nim new file mode 100644 index 00000000..efdb3c6a --- /dev/null +++ b/codex/utils/poseidon2digest.nim @@ -0,0 +1,79 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/poseidon2 +import pkg/questionable/results +import pkg/libp2p/multihash +import pkg/stew/byteutils + +import ../merkletree + +func spongeDigest*( + _: type Poseidon2Hash, + bytes: openArray[byte], + rate: static int = 2): ?!Poseidon2Hash = + ## Hashes chunks of data with a sponge of rate 1 or 2. + ## + + success Sponge.digest(bytes, rate) + +func spongeDigest*( + _: type Poseidon2Hash, + bytes: openArray[Bn254Fr], + rate: static int = 2): ?!Poseidon2Hash = + ## Hashes chunks of elements with a sponge of rate 1 or 2. + ## + + success Sponge.digest(bytes, rate) + +func digestTree*( + _: type Poseidon2Tree, + bytes: openArray[byte], + chunkSize: int): ?!Poseidon2Tree = + ## Hashes chunks of data with a sponge of rate 2, and combines the + ## resulting chunk hashes in a merkle root. + ## + + # doAssert not(rate == 1 or rate == 2), "rate can only be 1 or 2" + + if not chunkSize > 0: + return failure("chunkSize must be greater than 0") + + var index = 0 + var leaves: seq[Poseidon2Hash] + while index < bytes.len: + let start = index + let finish = min(index + chunkSize, bytes.len) + let digest = ? Poseidon2Hash.spongeDigest(bytes.toOpenArray(start, finish - 1), 2) + leaves.add(digest) + index += chunkSize + return Poseidon2Tree.init(leaves) + +func digest*( + _: type Poseidon2Tree, + bytes: openArray[byte], + chunkSize: int): ?!Poseidon2Hash = + ## Hashes chunks of data with a sponge of rate 2, and combines the + ## resulting chunk hashes in a merkle root. + ## + + (? Poseidon2Tree.digestTree(bytes, chunkSize)).root + +func digestMhash*( + _: type Poseidon2Tree, + bytes: openArray[byte], + chunkSize: int): ?!MultiHash = + ## Hashes chunks of data with a sponge of rate 2 and + ## returns the multihash of the root + ## + + let + hash = ? Poseidon2Tree.digest(bytes, chunkSize) + + ? MultiHash.init(Pos2Bn128MrklCodec, hash).mapFailure diff --git a/codex/utils/statemachine.nim b/codex/utils/statemachine.nim deleted file mode 100644 index 9b7a4309..00000000 --- a/codex/utils/statemachine.nim +++ /dev/null @@ -1,102 +0,0 @@ -import pkg/questionable -import pkg/chronos -import ./optionalcast - -## Implementation of the the state pattern: -## https://en.wikipedia.org/wiki/State_pattern -## -## Define your own state machine and state types: -## -## type -## Light = ref object of StateMachine -## color: string -## LightState = ref object of State -## -## let light = Light(color: "yellow") -## -## Define the states: -## -## type -## On = ref object of LightState -## Off = ref object of LightState -## -## Perform actions on state entry and exit: -## -## method enter(state: On) = -## echo light.color, " light switched on" -## -## method exit(state: On) = -## echo light.color, " light no longer switched on" -## -## light.switch(On()) # prints: 'light switched on' -## light.switch(Off()) # prints: 'light no longer switched on' -## -## Allow behaviour to change based on the current state: -## -## method description*(state: LightState): string {.base.} = -## return "a light" -## -## method description*(state: On): string = -## if light =? (state.context as Light): -## return "a " & light.color & " light" -## -## method description*(state: Off): string = -## return "a dark light" -## -## proc description*(light: Light): string = -## if state =? (light.state as LightState): -## return state.description -## -## light.switch(On()) -## echo light.description # prints: 'a yellow light' -## light.switch(Off()) -## echo light.description # prints 'a dark light' - - -export questionable -export optionalcast - -type - StateMachine* = ref object of RootObj - state: ?State - State* = ref object of RootObj - context: ?StateMachine - -method enter(state: State) {.base.} = - discard - -method exit(state: State) {.base.} = - discard - -func state*(machine: StateMachine): ?State = - machine.state - -func context*(state: State): ?StateMachine = - state.context - -proc switch*(machine: StateMachine, newState: State) = - if state =? machine.state: - state.exit() - state.context = StateMachine.none - machine.state = newState.some - newState.context = machine.some - newState.enter() - -proc switch*(oldState, newState: State) = - if context =? oldState.context: - context.switch(newState) - -type - AsyncState* = ref object of State - -method enterAsync(state: AsyncState) {.base, async.} = - discard - -method exitAsync(state: AsyncState) {.base, async.} = - discard - -method enter(state: AsyncState) = - asyncSpawn state.enterAsync() - -method exit(state: AsyncState) = - asyncSpawn state.exitAsync() diff --git a/codex/utils/stintutils.nim b/codex/utils/stintutils.nim new file mode 100644 index 00000000..125ff8b6 --- /dev/null +++ b/codex/utils/stintutils.nim @@ -0,0 +1,4 @@ +import pkg/stint + +func fromDecimal*(T: typedesc[StUint|StInt], s: string): T {.inline.} = + parse(s, type result, radix = 10) diff --git a/codex/utils/then.nim b/codex/utils/then.nim new file mode 100644 index 00000000..fbcf7bf3 --- /dev/null +++ b/codex/utils/then.nim @@ -0,0 +1,207 @@ +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/upraises + +# Similar to JavaScript's Promise API, `.then` and `.catch` can be used to +# handle results and errors of async `Futures` within a synchronous closure. +# They can be used as an alternative to `asyncSpawn` which does not return a +# value and will raise a `FutureDefect` if there are unhandled errors +# encountered. Both `.then` and `.catch` act as callbacks that do not block the +# synchronous closure's flow. + +# `.then` is called when the `Future` is successfully completed and can be +# chained as many times as desired, calling each `.then` callback in order. When +# the `Future` returns `Result[T, ref CatchableError]` (or `?!T`), the value +# called in the `.then` callback will be unpacked from the `Result` as a +# convenience. In other words, for `Future[?!T]`, the `.then` callback will take +# a single parameter `T`. See `tests/utils/testthen.nim` for more examples. To +# allow for chaining, `.then` returns its future. If the future is already +# complete, the `.then` callback will be executed immediately. + +# `.catch` is called when the `Future` fails. In the case when the `Future` +# returns a `Result[T, ref CatchableError` (or `?!T`), `.catch` will be called +# if the `Result` contains an error. If the `Future` is already failed (or +# `Future[?!T]` contains an error), the `.catch` callback will be executed +# immediately. + +# `.cancelled` is called when the `Future` is cancelled. If the `Future` is +# already cancelled, the `.cancelled` callback will be executed immediately. + +# More info on JavaScript's Promise API can be found at: +# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise + +runnableExamples: + proc asyncProc(): Future[int] {.async.} = + await sleepAsync(1.millis) + return 1 + + asyncProc() + .then(proc(i: int) = echo "returned ", i) + .catch(proc(e: ref CatchableError) = doAssert false, "will not be triggered") + + # outputs "returned 1" + + proc asyncProcWithError(): Future[int] {.async.} = + await sleepAsync(1.millis) + raise newException(ValueError, "some error") + + asyncProcWithError() + .then(proc(i: int) = doAssert false, "will not be triggered") + .catch(proc(e: ref CatchableError) = echo "errored: ", e.msg) + + # outputs "errored: some error" + +type + OnSuccess*[T] = proc(val: T) {.gcsafe, upraises: [].} + OnError* = proc(err: ref CatchableError) {.gcsafe, upraises: [].} + OnCancelled* = proc() {.gcsafe, upraises: [].} + +proc ignoreError(err: ref CatchableError) = discard +proc ignoreCancelled() = discard + +template handleFinished(future: FutureBase, + onError: OnError, + onCancelled: OnCancelled) = + + if not future.finished: + return + + if future.cancelled: + onCancelled() + return + + if future.failed: + onError(future.error) + return + +proc then*(future: Future[void], onSuccess: OnSuccess[void]): Future[void] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, ignoreCancelled) + onSuccess() + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*[T](future: Future[T], onSuccess: OnSuccess[T]): Future[T] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, ignoreCancelled) + + if val =? future.read.catch: + onSuccess(val) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*[T](future: Future[?!T], onSuccess: OnSuccess[T]): Future[?!T] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, ignoreCancelled) + + try: + if val =? future.read: + onSuccess(val) + except CatchableError as e: + ignoreError(e) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*(future: Future[?!void], onSuccess: OnSuccess[void]): Future[?!void] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, ignoreCancelled) + + try: + if future.read.isOk: + onSuccess() + except CatchableError as e: + ignoreError(e) + return + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc catch*[T](future: Future[T], onError: OnError) = + + if future.isNil: return + + proc cb(udata: pointer) = + future.handleFinished(onError, ignoreCancelled) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + +proc catch*[T](future: Future[?!T], onError: OnError) = + + if future.isNil: return + + proc cb(udata: pointer) = + future.handleFinished(onError, ignoreCancelled) + + try: + if err =? future.read.errorOption: + onError(err) + except CatchableError as e: + onError(e) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + +proc cancelled*[T](future: Future[T], onCancelled: OnCancelled): Future[T] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, onCancelled) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + onCancelled() + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc cancelled*[T](future: Future[?!T], onCancelled: OnCancelled): Future[?!T] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, onCancelled) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + onCancelled() + + future.addCallback(cb) + future.cancelCallback = cancellation + return future diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim new file mode 100644 index 00000000..9361d07b --- /dev/null +++ b/codex/utils/timer.nim @@ -0,0 +1,55 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +## Timer +## Used to execute a callback in a loop + +import pkg/upraises + +push: {.upraises: [].} + +import pkg/chronos + +import ../logutils + +type + TimerCallback* = proc(): Future[void] {.gcsafe, upraises:[].} + Timer* = ref object of RootObj + callback: TimerCallback + interval: Duration + name: string + loopFuture: Future[void] + +proc new*(T: type Timer, timerName = "Unnamed Timer"): Timer = + ## Create a new Timer intance with the given name + Timer(name: timerName) + +proc timerLoop(timer: Timer) {.async.} = + try: + while true: + await timer.callback() + await sleepAsync(timer.interval) + except CancelledError: + raise + except CatchableError as exc: + error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg + +method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.base.} = + if timer.loopFuture != nil: + return + trace "Timer starting: ", name=timer.name + timer.callback = callback + timer.interval = interval + timer.loopFuture = timerLoop(timer) + +method stop*(timer: Timer) {.async, base.} = + if timer.loopFuture != nil: + trace "Timer stopping: ", name=timer.name + await timer.loopFuture.cancelAndWait() + timer.loopFuture = nil diff --git a/codex/utils/trackedfutures.nim b/codex/utils/trackedfutures.nim new file mode 100644 index 00000000..f3fcdb2d --- /dev/null +++ b/codex/utils/trackedfutures.nim @@ -0,0 +1,52 @@ +import std/sugar +import std/tables +import pkg/chronos + +import ../logutils +import ../utils/then + +type + TrackedFutures* = ref object + futures: Table[uint, FutureBase] + cancelling: bool + +logScope: + topics = "trackable futures" + +proc len*(self: TrackedFutures): int = self.futures.len + +proc removeFuture(self: TrackedFutures, future: FutureBase) = + if not self.cancelling and not future.isNil: + self.futures.del(future.id) + +proc track*[T](self: TrackedFutures, fut: Future[T]): Future[T] = + if self.cancelling: + return fut + + self.futures[fut.id] = FutureBase(fut) + + fut + .then((val: T) => self.removeFuture(fut)) + .cancelled(() => self.removeFuture(fut)) + .catch((e: ref CatchableError) => self.removeFuture(fut)) + + return fut + +proc track*[T, U](future: Future[T], self: U): Future[T] = + ## Convenience method that allows chaining future, eg: + ## `await someFut().track(sales)`, where `sales` has declared a + ## `trackedFutures` property. + self.trackedFutures.track(future) + +proc cancelTracked*(self: TrackedFutures) {.async.} = + self.cancelling = true + + trace "cancelling tracked futures" + + for future in self.futures.values: + if not future.isNil and not future.finished: + trace "cancelling tracked future", id = future.id + await future.cancelAndWait() + + self.futures.clear() + self.cancelling = false diff --git a/codex/validation.nim b/codex/validation.nim new file mode 100644 index 00000000..011c6737 --- /dev/null +++ b/codex/validation.nim @@ -0,0 +1,113 @@ +import std/sets +import std/sequtils +import pkg/chronos +import ./market +import ./clock +import ./logutils + +export market +export sets + +type + Validation* = ref object + slots: HashSet[SlotId] + maxSlots: int + clock: Clock + market: Market + subscriptions: seq[Subscription] + running: Future[void] + periodicity: Periodicity + proofTimeout: UInt256 + +logScope: + topics = "codex validator" + +proc new*( + _: type Validation, + clock: Clock, + market: Market, + maxSlots: int +): Validation = + ## Create a new Validation instance + Validation(clock: clock, market: market, maxSlots: maxSlots) + +proc slots*(validation: Validation): seq[SlotId] = + validation.slots.toSeq + +proc getCurrentPeriod(validation: Validation): UInt256 = + return validation.periodicity.periodOf(validation.clock.now().u256) + +proc waitUntilNextPeriod(validation: Validation) {.async.} = + let period = validation.getCurrentPeriod() + let periodEnd = validation.periodicity.periodEnd(period) + trace "Waiting until next period", currentPeriod = period + await validation.clock.waitUntil(periodEnd.truncate(int64) + 1) + +proc subscribeSlotFilled(validation: Validation) {.async.} = + proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + let slotId = slotId(requestId, slotIndex) + if slotId notin validation.slots: + if validation.slots.len < validation.maxSlots: + trace "Adding slot", slotId + validation.slots.incl(slotId) + let subscription = await validation.market.subscribeSlotFilled(onSlotFilled) + validation.subscriptions.add(subscription) + +proc removeSlotsThatHaveEnded(validation: Validation) {.async.} = + var ended: HashSet[SlotId] + let slots = validation.slots + for slotId in slots: + let state = await validation.market.slotState(slotId) + if state != SlotState.Filled: + trace "Removing slot", slotId + ended.incl(slotId) + validation.slots.excl(ended) + +proc markProofAsMissing(validation: Validation, + slotId: SlotId, + period: Period) {.async.} = + logScope: + currentPeriod = validation.getCurrentPeriod() + + try: + if await validation.market.canProofBeMarkedAsMissing(slotId, period): + trace "Marking proof as missing", slotId, periodProofMissed = period + await validation.market.markProofAsMissing(slotId, period) + else: + let inDowntime {.used.} = await validation.market.inDowntime(slotId) + trace "Proof not missing", checkedPeriod = period, inDowntime + except CancelledError: + raise + except CatchableError as e: + error "Marking proof as missing failed", msg = e.msg + +proc markProofsAsMissing(validation: Validation) {.async.} = + let slots = validation.slots + for slotId in slots: + let previousPeriod = validation.getCurrentPeriod() - 1 + await validation.markProofAsMissing(slotId, previousPeriod) + +proc run(validation: Validation) {.async.} = + trace "Validation started" + try: + while true: + await validation.waitUntilNextPeriod() + await validation.removeSlotsThatHaveEnded() + await validation.markProofsAsMissing() + except CancelledError: + trace "Validation stopped" + discard + except CatchableError as e: + error "Validation failed", msg = e.msg + +proc start*(validation: Validation) {.async.} = + validation.periodicity = await validation.market.periodicity() + validation.proofTimeout = await validation.market.proofTimeout() + await validation.subscribeSlotFilled() + validation.running = validation.run() + +proc stop*(validation: Validation) {.async.} = + await validation.running.cancelAndWait() + while validation.subscriptions.len > 0: + let subscription = validation.subscriptions.pop() + await subscription.unsubscribe() diff --git a/config.nims b/config.nims index 0523845d..b64aacbd 100644 --- a/config.nims +++ b/config.nims @@ -1,11 +1,23 @@ -import std/os -if defined(release): +include "build.nims" + +import std/os +const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)] + +when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and + # BEWARE + # In Nim 1.6, config files are evaluated with a working directory + # matching where the Nim command was invocated. This means that we + # must do all file existence checks with full absolute paths: + system.fileExists(currentDir & "nimbus-build-system.paths"): + include "nimbus-build-system.paths" + +when defined(release): switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName")) else: switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName")) -if defined(limitStackUsage): +when defined(limitStackUsage): # This limits stack usage of each individual function to 1MB - the option is # available on some GCC versions but not all - run with `-d:limitStackUsage` # and look for .su files in "./build/", "./nimcache/" or $TMPDIR that list the @@ -13,7 +25,7 @@ if defined(limitStackUsage): switch("passC", "-fstack-usage -Werror=stack-usage=1048576") switch("passL", "-fstack-usage -Werror=stack-usage=1048576") -if defined(windows): +when defined(windows): # https://github.com/nim-lang/Nim/pull/19891 switch("define", "nimRawSetjmp") @@ -37,8 +49,8 @@ if defined(windows): # engineering a more portable binary release, this should be tweaked but still # use at least -msse2 or -msse3. -if defined(disableMarchNative): - if defined(i386) or defined(amd64): +when defined(disableMarchNative): + when defined(i386) or defined(amd64): switch("passC", "-mssse3") elif defined(macosx) and defined(arm64): # Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758 @@ -59,6 +71,20 @@ else: --define:metrics # for heap-usage-by-instance-type metrics and object base-type strings --define:nimTypeNames +--styleCheck:usages +--styleCheck:error +--maxLoopIterationsVM:1000000000 +--fieldChecks:on +--warningAsError:"ProveField:on" + +when (NimMajor, NimMinor) >= (1, 4): + --warning:"ObservableStores:off" + --warning:"LockLevel:off" + --hint:"XCannotRaiseY:off" +when (NimMajor, NimMinor) >= (1, 6): + --warning:"DotLikeOps:off" +when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11): + --warning:"BareExcept:off" switch("define", "withoutPCRE") @@ -72,8 +98,6 @@ if not defined(macosx): --define:nimStackTraceOverride switch("import", "libbacktrace") ---define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9 - # `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" switch("warning", "CaseTransition:off") @@ -88,7 +112,17 @@ switch("define", "libp2p_pki_schemes=secp256k1") #TODO this infects everything in this folder, ideally it would only # apply to codex.nim, but since codex.nims is used for other purpose # we can't use it. And codex.cfg doesn't work -switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic]") +switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dynamic]") + +# Workaround for assembler incompatibility between constantine and secp256k1 +switch("define", "use_asm_syntax_intel=false") +switch("define", "ctt_asm=false") + +# Allow the use of old-style case objects for nim config compatibility +switch("define", "nimOldCaseObjects") + +# Enable compat mode for Chronos V4 +switch("define", "chronosHandleException") # begin Nimble config (version 1) when system.fileExists("nimble.paths"): diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..21356698 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,57 @@ +# Codex Docker Image + + Codex provides pre-built docker images and they are stored in the [codexstorage/nim-codex](https://hub.docker.com/repository/docker/codexstorage/nim-codex) repository. + + +## Run + + We can run Codex Docker image using CLI + ```shell + # Default run + docker run --rm codexstorage/nim-codex + + # Mount local datadir + docker run -v ./datadir:/datadir --rm codexstorage/nim-codex codex --data-dir=/datadir + ``` + + And Docker Compose + ```shell + # Run in detached mode + docker-compose up -d + ``` + + +## Arguments + + Docker image is based on the [codex.Dockerfile](codex.Dockerfile) and there is + ``` + ENTRYPOINT ["/docker-entrypoint.sh"] + CMD ["codex"] + ``` + + It means that at the image run it will just run `codex` application without any arguments and we can pass them as a regular arguments, by overriding command + ```shell + docker run codexstorage/nim-codex codex --api-bindaddr=0.0.0.0 --api-port=8080 + ``` + + +## Environment variables + + We can configure Codex using [Environment variables](../README#environment-variables) and [docker-compose.yaml](docker-compose.yaml) file can be useful as an example. + + We also added a temporary environment variable `NAT_IP_AUTO` to the entrypoint which is set as `false` for releases and ` true` for regular builds. That approach is useful for Dist-Tests. + ```shell + # Disable NAT_IP_AUTO for regular builds + docker run -e NAT_IP_AUTO=false codexstorage/nim-codex + ``` + + +## Slim + 1. Build the image using `docker build -t codexstorage/codexsetup:latest -f codex.Dockerfile ..` + 2. The docker image can then be minified using [slim](https://github.com/slimtoolkit/slim). Install slim on your path and then run: + ```shell + slim # brings up interactive prompt + >>> build --target status-im/codexsetup --http-probe-off true + ``` + 3. This should output an image with name `status-im/codexsetup.slim` + 4. We can then bring up the image using `docker-compose up -d`. diff --git a/docker/codex.Dockerfile b/docker/codex.Dockerfile new file mode 100644 index 00000000..f3ffb92e --- /dev/null +++ b/docker/codex.Dockerfile @@ -0,0 +1,44 @@ +# Variables +ARG BUILDER=ubuntu:24.04 +ARG IMAGE=${BUILDER} +ARG RUST_VERSION=${RUST_VERSION:-1.78.0} +ARG BUILD_HOME=/src +ARG MAKE_PARALLEL=${MAKE_PARALLEL:-4} +ARG NIMFLAGS="${NIMFLAGS:-"-d:disableMarchNative"}" +ARG APP_HOME=/codex +ARG NAT_IP_AUTO=${NAT_IP_AUTO:-false} + +# Build +FROM ${BUILDER} AS builder +ARG RUST_VERSION +ARG BUILD_HOME +ARG MAKE_PARALLEL +ARG NIMFLAGS + +RUN apt-get update && apt-get install -y git cmake curl make bash lcov build-essential +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${RUST_VERSION} -y + +SHELL ["/bin/bash", "-c"] +ENV BASH_ENV="/etc/bash_env" +RUN echo "export PATH=$PATH:$HOME/.cargo/bin" >> $BASH_ENV + +WORKDIR ${BUILD_HOME} +COPY . . +RUN make clean +RUN make -j ${MAKE_PARALLEL} update +RUN make -j ${MAKE_PARALLEL} + +# Create +FROM ${IMAGE} +ARG BUILD_HOME +ARG APP_HOME +ARG NAT_IP_AUTO + +WORKDIR ${APP_HOME} +COPY --from=builder ${BUILD_HOME}/build/codex /usr/local/bin +COPY --from=builder ${BUILD_HOME}/openapi.yaml . +COPY --from=builder --chmod=0755 ${BUILD_HOME}/docker/docker-entrypoint.sh / +RUN apt-get update && apt-get install -y libgomp1 bash curl jq && rm -rf /var/lib/apt/lists/* +ENV NAT_IP_AUTO=${NAT_IP_AUTO} +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["codex"] diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml new file mode 100644 index 00000000..f2d76d23 --- /dev/null +++ b/docker/docker-compose.yaml @@ -0,0 +1,45 @@ +services: + codex-node1: + image: codexstorage/nim-codex:sha-82b0399 + environment: + - CODEX_LOG_LEVEL=${CODEX_LOG_LEVEL:-TRACE} + - CODEX_METRICS=${CODEX_METRICS:-false} + - CODEX_METRICS_ADDRESS=${CODEX_METRICS_ADDRESS:-0.0.0.0} + - CODEX_METRICS_PORT=${CODEX_METRICS_PORT:-8008} + - CODEX_DATA_DIR=${CODEX_DATA_DIR:-/datadir} + - CODEX_LISTEN_ADDRS=${CODEX_LISTEN_ADDRS:-/ip4/0.0.0.0/tcp/2345} + - CODEX_NAT=${CODEX_NAT:-10.0.0.10} + - CODEX_DISC_IP=${CODEX_DISC_IP:-0.0.0.0} + - CODEX_DISC_PORT=${CODEX_DISC_PORT:-8090} + - CODEX_NET_PRIVKEY=${CODEX_NET_PRIVKEY:-key} + # - CODEX_BOOTSTRAP_NODE=${CODEX_BOOTSTRAP_NODE} + - CODEX_MAX_PEERS=${CODEX_MAX_PEERS:-160} + - CODEX_AGENT_STRING=${CODEX_AGENT_STRING:-Codex} + - CODEX_API_BINDADDR=${CODEX_API_BINDADDR:-0.0.0.0} + - CODEX_API_PORT=${CODEX_API_PORT:-8080} + - CODEX_REPO_KIND=${CODEX_REPO_KIND:-fs} + - CODEX_STORAGE_QUOTA=${CODEX_STORAGE_QUOTA:-8589934592} + - CODEX_BLOCK_TTL=${CODEX_BLOCK_TTL:-0} + # - CODEX_BLOCK_MI=${CODEX_BLOCK_MI} + - CODEX_BLOCK_MN=${CODEX_BLOCK_MN:-1000} + - CODEX_CACHE_SIZE=${CODEX_CACHE_SIZE:-0} + - CODEX_PERSISTENCE=${CODEX_PERSISTENCE:-false} + - CODEX_ETH_PROVIDER=${CODEX_ETH_PROVIDER:-ws://localhost:8545} + # - CODEX_ETH_ACCOUNT=${CODEX_ETH_ACCOUNT} + # - CODEX_MARKETPLACE_ADDRESS=${CODEX_MARKETPLACE_ADDRESS:-0x59b670e9fA9D0A427751Af201D676719a970857b} + - CODEX_VALIDATOR=${CODEX_VALIDATOR:-false} + - CODEX_VALIDATOR_MAX_SLOTS=${CODEX_VALIDATOR_MAX_SLOTS:-1000} + - NAT_IP_AUTO=false + - NAT_PUBLIC_IP_AUTO=https://ipinfo.io/ip + ports: + - 8080:8080/tcp # REST API + - 8008:8008/tcp # Metrics + - 2345:2345/tcp # libp2p + - 8090:8090/udp # DHT discovery + volumes: + - ./datadir:/datadir:z + networks: + - codex +networks: + codex: + driver: bridge diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh new file mode 100644 index 00000000..9875d9ae --- /dev/null +++ b/docker/docker-entrypoint.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Environment variables from files +if [[ -n "${ENV_PATH}" ]]; then + set -a + [[ -f "${ENV_PATH}" ]] && source "${ENV_PATH}" || for f in "${ENV_PATH}"/*; do source "$f"; done + set +a +fi + +# Parameters +if [[ -z "${CODEX_NAT}" ]]; then + if [[ "${NAT_IP_AUTO}" == "true" && -z "${NAT_PUBLIC_IP_AUTO}" ]]; then + export CODEX_NAT=$(hostname --ip-address) + echo "Private: CODEX_NAT=${CODEX_NAT}" + elif [[ -n "${NAT_PUBLIC_IP_AUTO}" ]]; then + # Run for 60 seconds if fail + WAIT=120 + SECONDS=0 + SLEEP=5 + while (( SECONDS < WAIT )); do + export CODEX_NAT=$(curl -s -f -m 5 "${NAT_PUBLIC_IP_AUTO}") + # Check if exit code is 0 and returned value is not empty + if [[ $? -eq 0 && -n "${CODEX_NAT}" ]]; then + echo "Public: CODEX_NAT=${CODEX_NAT}" + break + else + # Sleep and check again + echo "Can't get Public IP - Retry in $SLEEP seconds / $((WAIT - SECONDS))" + sleep $SLEEP + fi + done + fi +fi + +# Stop Codex run if can't get NAT IP when requested +if [[ "${NAT_IP_AUTO}" == "true" && -z "${CODEX_NAT}" ]]; then + echo "Can't get Private IP - Stop Codex run" + exit 1 +elif [[ -n "${NAT_PUBLIC_IP_AUTO}" && -z "${CODEX_NAT}" ]]; then + echo "Can't get Public IP in $WAIT seconds - Stop Codex run" + exit 1 +fi + +# If marketplace is enabled from the testing environment, +# The file has to be written before Codex starts. +if [ -n "${PRIV_KEY}" ]; then + echo ${PRIV_KEY} > "private.key" + chmod 600 "private.key" + export CODEX_ETH_PRIVATE_KEY="private.key" + echo "Private key set" +fi + +# Run +echo "Run Codex node" +exec "$@" diff --git a/docs/DownloadFlow.md b/docs/DownloadFlow.md new file mode 100644 index 00000000..040897eb --- /dev/null +++ b/docs/DownloadFlow.md @@ -0,0 +1,68 @@ +# Download Flow +Sequence of interactions that result in dat blocks being transferred across the network. + +## Local Store +When data is available in the local blockstore, + +```mermaid +sequenceDiagram +actor Alice +participant API +Alice->>API: Download(CID) +API->>+Node/StoreStream: Retrieve(CID) +loop Get manifest block, then data blocks + Node/StoreStream->>NetworkStore: GetBlock(CID) + NetworkStore->>LocalStore: GetBlock(CID) + LocalStore->>NetworkStore: Block + NetworkStore->>Node/StoreStream: Block +end +Node/StoreStream->>Node/StoreStream: Handle erasure coding +Node/StoreStream->>-API: Data stream +API->>Alice: Stream download of block +``` + +## Network Store +When data is not found ih the local blockstore, the block-exchange engine is used to discover the location of the block within the network. Connection will be established to the node(s) that have the block, and exchange can take place. + +```mermaid +sequenceDiagram +box +actor Alice +participant API +participant Node/StoreStream +participant NetworkStore +participant Discovery +participant Engine +end +box +participant OtherNode +end +Alice->>API: Download(CID) +API->>+Node/StoreStream: Retrieve(CID) +Node/StoreStream->>-API: Data stream +API->>Alice: Download stream begins +loop Get manifest block, then data blocks + Node/StoreStream->>NetworkStore: GetBlock(CID) + NetworkStore->>Engine: RequestBlock(CID) + opt CID not known + Engine->>Discovery: Discovery Block + Discovery->>Discovery: Locates peers who provide block + Discovery->>Engine: Peers + Engine->>Engine: Update peers admin + end + Engine->>Engine: Select optimal peer + Engine->>OtherNode: Send WantHave list + OtherNode->>Engine: Send BlockPresence + Engine->>Engine: Update peers admin + Engine->>Engine: Decide to buy block + Engine->>OtherNode: Send WantBlock list + OtherNode->>Engine: Send Block + Engine->>NetworkStore: Block + NetworkStore->>NetworkStore: Add to Local store + NetworkStore->>Node/StoreStream: Resolve Block + Node/StoreStream->>Node/StoreStream: Handle erasure coding + Node/StoreStream->>API: Push data to stream +end +API->>Alice: Download stream finishes +``` + diff --git a/docs/Marketplace.md b/docs/Marketplace.md new file mode 100644 index 00000000..53712b0c --- /dev/null +++ b/docs/Marketplace.md @@ -0,0 +1,444 @@ +# Running a Local Codex Network with Marketplace Support + +This tutorial will teach you how to run a small Codex network with the _storage marketplace_ enabled; i.e., the functionality in Codex which allows participants to offer and buy storage in a market, ensuring that storage providers honor their part of the deal by means of cryptographic proofs. + +To complete this tutorial, you will need: + +* the [geth](https://github.com/ethereum/go-ethereum) Ethereum client; +* a Codex binary, which [you can compile from source](https://github.com/codex-storage/nim-codex?tab=readme-ov-file#build-and-run). + +We will also be using [bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) syntax throughout. If you use a different shell, you may need to adapt things to your platform. + +In this tutorial, you will: + +1. [Set Up a Geth PoA network](#1-set-up-a-geth-poa-network); +2. [Set up The Marketplace](#2-set-up-the-marketplace); +3. [Run Codex](#3-run-codex); +4. [Buy and Sell Storage in the Marketplace](#4-buy-and-sell-storage-on-the-marketplace). + +We strongly suggest you to create a folder (e.g. `marketplace-tutorial`), and switch into it before beginning. + +## 1. Set Up a Geth PoA Network + +For this tutorial, we will use a simple [Proof-of-Authority](https://github.com/ethereum/EIPs/issues/225) network with geth. The first step is creating a _signer account_: an account which will be used by geth to sign the blocks in the network. Any block signed by a signer is accepted as valid. + +### 1.1. Create a Signer Account + +To create a signer account, run: + +```bash +geth account new --datadir geth-data +``` + +The account generator will ask you to input a password, which you can leave blank. It will then print some information, including the account's public address: + +```bash +INFO [03-22|12:58:05.637] Maximum peer count ETH=50 total=50 +INFO [03-22|12:58:05.638] Smartcard socket not found, disabling err="stat /run/pcscd/pcscd.comm: no such file or directory" +Your new account is locked with a password. Please give a password. Do not forget this password. +Password: +Repeat password: + +Your new key was generated + +Public address of the key: 0x93976895c4939d99837C8e0E1779787718EF8368 +... +``` + +In this example, the public address of the signer account is `0x93976895c4939d99837C8e0E1779787718EF8368`. Yours will print a different address. Save it for later usage. + +Next set an environment variable for later usage: + +```sh +export GETH_SIGNER_ADDR="0x0000000000000000000000000000000000000000" +echo ${GETH_SIGNER_ADDR} > geth_signer_address.txt +``` + +### 1.2. Configure The Network and Create the Genesis Block + +The next step is telling geth what kind of network you want to run. We will be running a [pre-merge](https://ethereum.org/en/roadmap/merge/) network with Proof-of-Authority consensus. To get that working, create a `network.json` file. + +If you set the GETH_SIGNER_ADDR variable above you can run to create the `network.json` file: + +```sh +echo "{\"config\": { \"chainId\": 12345, \"homesteadBlock\": 0, \"eip150Block\": 0, \"eip155Block\": 0, \"eip158Block\": 0, \"byzantiumBlock\": 0, \"constantinopleBlock\": 0, \"petersburgBlock\": 0, \"istanbulBlock\": 0, \"berlinBlock\": 0, \"londonBlock\": 0, \"arrowGlacierBlock\": 0, \"grayGlacierBlock\": 0, \"clique\": { \"period\": 1, \"epoch\": 30000 } }, \"difficulty\": \"1\", \"gasLimit\": \"8000000\", \"extradata\": \"0x0000000000000000000000000000000000000000000000000000000000000000${GETH_SIGNER_ADDR:2}0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\", \"alloc\": { \"${GETH_SIGNER_ADDR}\": { \"balance\": \"10000000000000000000000\"}}}" > network.json +``` + +You can also manually create the file with the following content modified with your signer private key: + +```json +{ + "config": { + "chainId": 12345, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "clique": { + "period": 1, + "epoch": 30000 + } + }, + "difficulty": "1", + "gasLimit": "8000000", + "extradata": "0x000000000000000000000000000000000000000000000000000000000000000093976895c4939d99837C8e0E1779787718EF83680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "alloc": { + "0x93976895c4939d99837C8e0E1779787718EF8368": { + "balance": "10000000000000000000000" + } + } +} +``` + +Note that the signer account address is embedded in two different places: +* inside of the `"extradata"` string, surrounded by zeroes and stripped of its `0x` prefix; +* as an entry key in the `alloc` session. +Make sure to replace that ID with the account ID that you wrote down in Step 1.1. + + +Once `network.json` is created, you can initialize the network with: + +```bash +geth init --datadir geth-data network.json +``` + +### 1.3. Start your PoA Node + +We are now ready to start our $1$-node, private blockchain. To launch the signer node, open a separate terminal on the same working directory and run: + +```bash +geth\ + --datadir geth-data\ + --networkid 12345\ + --unlock ${GETH_SIGNER_ADDR}\ + --nat extip:127.0.0.1\ + --netrestrict 127.0.0.0/24\ + --mine\ + --miner.etherbase ${GETH_SIGNER_ADDR}\ + --http\ + --allow-insecure-unlock +``` + +Note that, once again, the signer account created in Step 1.1 appears both in `--unlock` and `--allow-insecure-unlock`. Make sure you have the `GETH_SIGNER_ADDR` set. + +Geth will prompt you to insert the account's password as it starts up. Once you do that, it should be able to start up and begin "mining" blocks. + +## 2. Set Up The Marketplace + +You will need to open new terminal for this section and geth needs to be running already. Setting up the Codex marketplace entails: + +1. Deploying the Codex Marketplace contracts to our private blockchain +2. Setup Ethereum accounts we will use to buy and sell storage in the Codex marketplace +3. Provisioning those accounts with the required token balances + +### 2.1. Deploy the Codex Marketplace Contracts + +To deploy the contracts, start by cloning the Codex contracts repository locally and installing its dependencies: + +```bash +git clone https://github.com/codex-storage/codex-contracts-eth +cd codex-contracts-eth +npm install +``` +You now must **wait until $256$ blocks are mined in your PoA network**, or deploy will fail. This should take about $4$ minutes and $30$ seconds. You can check which block height you are currently at by running: + +```bash +geth attach --exec web3.eth.blockNumber ../geth-data/geth.ipc +``` + +once that gets past $256$, you are ready to go. To deploy contracts, run: + +```bash +export DISTTEST_NETWORK_URL=http://localhost:8545 # bootstrap node +npx hardhat --network codexdisttestnetwork deploy && cd ../ +``` + +If the command completes successfully, you are ready to prepare the accounts. + +### 2.2. Generate the Required Accounts + +We will run $2$ Codex nodes: a **storage provider**, which will sell storage on the network, and a **client**, which will buy and use such storage; we therefore need two valid Ethereum accounts. We could create random accounts by using one of the many tools available to that end but, since this is a tutorial running on a local private network, we will simply provide you with two pre-made accounts along with their private keys which you can copy and paste instead: + +First make sure you're back in the `marketplace-tutorial` folder and not the `codex-contracts-eth` subfolder. Then set these variables: + +**Storage:** +```sh +export ETH_STORAGE_ADDR=0x45BC5ca0fbdD9F920Edd12B90908448C30F32a37 +export ETH_STORAGE_PK=0x06c7ac11d4ee1d0ccb53811b71802fa92d40a5a174afad9f2cb44f93498322c3 +echo $ETH_STORAGE_PK > storage.pkey && chmod 0600 storage.pkey +``` + +**Client:** +```sh +export ETH_CLIENT_ADDR=0x9F0C62Fe60b22301751d6cDe1175526b9280b965 +export ETH_CLIENT_PK=0x5538ec03c956cb9d0bee02a25b600b0225f1347da4071d0fd70c521fdc63c2fc +echo $ETH_CLIENT_PK > client.pkey && chmod 0600 client.pkey +``` + +### 2.3. Provision Accounts with Tokens + +We now need to transfer some ETH to each of the accounts, as well as provide them with some Codex tokens for the storage node to use as collateral and for the client node to buy actual storage. + +Although the process is not particularly complicated, I suggest you use [the script we prepared](https://github.com/gmega/local-codex-bare/blob/main/scripts/mint-tokens.js) for that. This script, essentially: + +1. reads the Marketplace contract address and its ABI from the deployment data; +2. transfers $1$ ETH from the signer account to a target account if the target account has no ETH balance; +3. mints $n$ Codex tokens and adds it into the target account's balance. + +To use the script, just download it into a local file named `mint-tokens.js`, for instance using curl: + +```bash +# set the contract file location +export CONTRACT_DEPLOY_FULL="codex-contracts-eth/deployments/codexdisttestnetwork" +export GETH_SIGNER_ADDR=$(cat geth_signer_address.txt) +# download script +curl https://raw.githubusercontent.com/gmega/codex-local-bare/main/scripts/mint-tokens.js -o mint-tokens.js +``` + +```bash +# Installs Web3-js +npm install web3 +# Provides tokens to the storage account. +node ./mint-tokens.js $CONTRACT_DEPLOY_FULL/TestToken.json $GETH_SIGNER_ADDR 0x45BC5ca0fbdD9F920Edd12B90908448C30F32a37 10000000000 +# Provides tokens to the client account. +node ./mint-tokens.js $CONTRACT_DEPLOY_FULL/TestToken.json $GETH_SIGNER_ADDR 0x9F0C62Fe60b22301751d6cDe1175526b9280b965 10000000000 +``` + +If you get a message like `Usage: mint-tokens.js ` then you need to ensure you have + +## 3. Run Codex + +With accounts and geth in place, we can now start the Codex nodes. + +### 3.1. Storage Node + +The storage node will be the one storing data and submitting the proofs of storage to the chain. To do that, it needs access to: + +1. the address of the Marketplace contract that has been deployed to the local geth node in [Step 2.1](#21-deploy-the-codex-marketplace-contracts); +2. the sample ceremony files which are shipped in the Codex contracts repo. + +Recall you have clone the `codex-contracts-eth` repository in Step 2.1. All of the required files are in there. + +**Address of the Marketplace Contract.** The contract address can be found inside of the file `codex-contracts-eth/deployments/codexdisttestnetwork/Marketplace.json`: + +```bash +grep '"address":' ${CONTRACT_DEPLOY_FULL}/Marketplace.json +``` + +which should print something like: +```sh + "address": "0x8891732D890f5A7B7181fBc70F7482DE28a7B60f", +``` + +Then run the following with the correct market place address: +```sh +export MARKETPLACE_ADDRESS="0x0000000000000000000000000000000000000000" +echo ${MARKETPLACE_ADDRESS} > marketplace_address.txt +``` + +**Prover ceremony files.** The ceremony files are under the `codex-contracts-eth/verifier/networks/codexdisttestnetwork` subdirectory. There are three of them: `proof_main.r1cs`, `proof_main.zkey`, and `prooof_main.wasm`. We will need all of them to start the Codex storage node. + +**Starting the storage node.** Let: + +* `PROVER_ASSETS` contain the directory where the prover ceremony files are located. **This must be an absolute path**; +* `CODEX_BINARY` contain the location of your Codex binary; +* `MARKETPLACE_ADDRESS` contain the address of the Marketplace contract (obtained above). + +Set these paths into environment variables (modify it with the correct paths if you changed them above): + +```sh +export CONTRACT_DEPLOY_FULL=$(realpath "codex-contracts-eth/deployments/codexdisttestnetwork") +export PROVER_ASSETS=$(realpath "codex-contracts-eth/verifier/networks/codexdisttestnetwork/") +export CODEX_BINARY=$(realpath "../build/codex") +export MARKETPLACE_ADDRESS=$(cat marketplace_address.txt) +``` + +To launch the storage node, run: + +```bash +${CODEX_BINARY}\ + --data-dir=./codex-storage\ + --listen-addrs=/ip4/0.0.0.0/tcp/8080\ + --api-port=8000\ + --disc-port=8090\ + persistence\ + --eth-provider=http://localhost:8545\ + --eth-private-key=./storage.pkey\ + --marketplace-address=${MARKETPLACE_ADDRESS}\ + --validator\ + --validator-max-slots=1000\ + prover\ + --circom-r1cs=${PROVER_ASSETS}/proof_main.r1cs\ + --circom-wasm=${PROVER_ASSETS}/proof_main.wasm\ + --circom-zkey=${PROVER_ASSETS}/proof_main.zkey +``` + +**Starting the client node.** + +The client node is started similarly except that: + +* we need to pass the SPR of the storage node so it can form a network with it; +* since it does not run any proofs, it does not require any ceremony files. + +We get the Signed Peer Record (SPR) of the storage node so we can bootstrap the client node with it. To get the SPR, issue the following call: + +```bash +curl -H 'Accept: text/plain' 'http://localhost:8000/api/codex/v1/spr' +``` + +You should get the SPR back starting with `spr:`. Next set these paths into environment variables: + +```bash +# set the SPR for the storage node +export STORAGE_NODE_SPR=$(curl -H 'Accept: text/plain' 'http://localhost:8000/api/codex/v1/spr') +# basic vars +export CONTRACT_DEPLOY_FULL=$(realpath "codex-contracts-eth/deployments/codexdisttestnetwork") +export PROVER_ASSETS=$(realpath "codex-contracts-eth/verifier/networks/codexdisttestnetwork/") +export CODEX_BINARY=$(realpath "../build/codex") +export MARKETPLACE_ADDRESS=$(cat marketplace_address.txt) +``` + +```bash +${CODEX_BINARY}\ + --data-dir=./codex-client\ + --listen-addrs=/ip4/0.0.0.0/tcp/8081\ + --api-port=8001\ + --disc-port=8091\ + --bootstrap-node=${STORAGE_NODE_SPR}\ + persistence\ + --eth-provider=http://localhost:8545\ + --eth-private-key=./client.pkey\ + --marketplace-address=${MARKETPLACE_ADDRESS} +``` + +## 4. Buy and Sell Storage on the Marketplace + +Any storage negotiation has two sides: a buyer and a seller. Before we can actually request storage, therefore, we must first put some of it for sale. + +### 4.1 Sell Storage + +The following request will cause the storage node to put out $50\text{MB}$ of storage for sale for $1$ hour, at a price of $1$ Codex token per byte per second, while expressing that it's willing to take at most a $1000$ Codex token penalty for not fulfilling its part of the contract.[^1] + +```bash +curl 'http://localhost:8000/api/codex/v1/sales/availability' \ + --header 'Content-Type: application/json' \ + --data '{ + "totalSize": "50000000", + "duration": "3600", + "minPrice": "1", + "maxCollateral": "1000" +}' +``` + +This should return a response with an id a string (e.g. `"id": "0x552ef12a2ee64ca22b237335c7e1df884df36d22bfd6506b356936bc718565d4"`) which identifies this storage offer. To check the current storage offers for this node, you can issue: + +```bash +curl 'http://localhost:8000/api/codex/v1/sales/availability' +``` + +This should print a list of offers, with the one you just created figuring among them. + +## 4.2. Buy Storage + +Before we can buy storage, we must have some actual data to request storage for. Start by uploading a small file to your client node. On Linux you could, for instance, use `dd` to generate a $100KB$ file: + +```bash +dd if=/dev/urandom of=./data.bin bs=100K count=1 +``` + +but any small file will do. Assuming your file is named `data.bin`, you can upload it with: + +```bash +curl "http://localhost:8001/api/codex/v1/data" --data-bin @data.bin +``` + +Once the upload completes, you should see a CID (e.g. `zDvZRwzm2mK7tvDzKScRLapqGdgNTLyyEBvx1TQY37J2CdWdS6Sj`) for the file printed to the terminal. Use that CID in the purchase request: + +```bash +export CID=zDvZRwzm2mK7tvDzKScRLapqGdgNTLyyEBvx1TQY37J2CdWdS6Sj +export EXPIRY_TIME=$((1000 + $(date +%s))) # current time + 1000 seconds + # adjust expiry_time as desired, see below +``` + +```bash +curl "http://localhost:8001/api/codex/v1/storage/request/${CID}" \ + --header 'Content-Type: application/json' \ + --data "{ + \"duration\": \"1200\", + \"reward\": \"1\", + \"proofProbability\": \"3\", + \"expiry\": \"${EXPIRY_TIME}\", + \"nodes\": 1, + \"tolerance\": 0, + \"collateral\": \"1000\" + }" +``` + +The parameters under `--data` say that: + +1. we want to purchase storage for our file for $20$ minutes (`"duration": "1200"`); +2. we are willing to pay up to $1$ token per byte, per second (`"reward": "1"`); +3. our file will be split into four pieces (`"nodes": 3` and `"tolerance": 1`), so that we only need three pieces to rebuild the file; i.e., we can tolerate that at most one node stops storing our data; either due to failure or other reasons; +4. we demand `1000` tokens in collateral from storage providers for each piece. Since there are $4$ such pieces, there will be `4000` in total collateral committed by all of the storage providers taken together once our request is fulfilled. + +Finally, the `expiry` puts a cap on the block time at which our request expires. This has to be at most `current block time + duration`, which means this request can fail if you input the wrong number, which you likely will if you do not know what the current block time is. Fear not, however, as you can try an an arbitrary number (e.g. `1000`), and look at the failure message: + + `Expiry needs to be in future. Now: 1711995463` + +to compute a valid one. Just take the number in the error message and add the duration; i.e., `1711995463 + 1200 = 1711996663`, then use the resulting number (`1711996663`) as expiry and things should work. The request should return a purchase ID (e.g. `1d0ec5261e3364f8b9d1cf70324d70af21a9b5dccba380b24eb68b4762249185`), which you can use track the completion of your request in the marketplace. + +## 4.3. Track your Storage Requests + +POSTing a storage request will make it available in the storage market, and a storage node will eventually pick it up. + +You can poll the status of your request by means of: +```bash +export STORAGE_PURCHASE_ID="1d0ec5261e3364f8b9d1cf70324d70af21a9b5dccba380b24eb68b4762249185" +curl "http://localhost:8001/api/codex/v1/storage/purchases/${STORAGE_PURCHASE_ID}" +``` + +For instance: + +```bash +> curl 'http://localhost:8001/api/codex/v1/storage/purchases/6c698cd0ad71c41982f83097d6fa75beb582924e08a658357a1cd4d7a2a6766d' +``` + +This returns a result like: + +```json +{ + "requestId": "0x6c698cd0ad71c41982f83097d6fa75beb582924e08a658357a1cd4d7a2a6766d", + "request": { + "client": "0xed6c3c20358f0217919a30c98d72e29ceffedc33", + "ask": { + "slots": 3, + "slotSize": "262144", + "duration": "1000", + "proofProbability": "3", + "reward": "1", + "collateral": "1", + "maxSlotLoss": 1 + }, + "content": { + "cid": "zDvZRwzm3nnkekFLCACmWyKdkYixsX3j9gJhkvFtfYA5K9bpXQnC" + }, + "expiry": "1711992852", + "nonce": "0x9f5e651ecd3bf73c914f8ed0b1088869c64095c0d7bd50a38fc92ebf66ff5915", + "id": "0x6c698cd0ad71c41982f83097d6fa75beb582924e08a658357a1cd4d7a2a6766d" + }, + "state": "submitted", + "error": null +} +``` + +Shows that a request has been submitted but has not yet been filled. Your request will be successful once `"state"` shows `"started"`. Anything other than that means the request has not been completely processed yet, and an `"error"` state other than `null` means it failed. + +[^1]: Codex files get partitioned into pieces called "slots" and distributed to various storage providers. The collateral refers to one such slot, and will be slowly eaten away as the storage provider fails to deliver timely proofs, but the actual logic is [more involved than that](https://github.com/codex-storage/codex-contracts-eth/blob/6c9f797f408608958714024b9055fcc330e3842f/contracts/Marketplace.sol#L209). diff --git a/docs/TwoClientTest.md b/docs/TwoClientTest.md new file mode 100644 index 00000000..4859247c --- /dev/null +++ b/docs/TwoClientTest.md @@ -0,0 +1,176 @@ +# Codex Two-Client Test + +The two-client test is a manual test you can perform to check your setup and familiarize yourself with the Codex API. These steps will guide you through running and connecting two nodes, in order to upload a file to one and then download that file from the other. This test also includes running a local blockchain node in order to have the Marketplace functionality available. However, running a local blockchain node is not strictly necessary, and you can skip steps marked as optional if you choose not start a local blockchain node. + +## Prerequisite + +Make sure you have built the client, and can run it as explained in the [README](../README.md). + +## Steps + +### 0. Setup blockchain node (optional) + +You need to have installed NodeJS and npm in order to spinup a local blockchain node. + +Go to directory `vendor/codex-contracts-eth` and run these two commands: +``` +npm ci +npm start +``` + +This will launch a local Ganache blockchain. + +### 1. Launch Node #1 + +Open a terminal and run: +- Mac/Unx: `"build/codex" --data-dir="$(pwd)/Data1" --listen-addrs="/ip4/127.0.0.1/tcp/8070" --api-port=8080 --disc-port=8090` +- Windows: `"build/codex.exe" --data-dir="Data1" --listen-addrs="/ip4/127.0.0.1/tcp/8070" --api-port=8080 --disc-port=8090` + +Optionally, if you want to use the Marketplace blockchain functionality, you need to also include these flags: `--persistence --eth-account=`, where `account` can be one following: + + - `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` + - `0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC` + - `0x90F79bf6EB2c4f870365E785982E1f101E93b906` + - `0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65` + +**For each node use a different account!** + +| Argument | Description | +|----------------|-----------------------------------------------------------------------| +| `data-dir` | We specify a relative path where the node will store its data. | +| `listen-addrs` | Multiaddress where the node will accept connections from other nodes. | +| `api-port` | Port on localhost where the node will expose its API. | +| `disc-port` | Port the node will use for its discovery service. | +| `persistence` | Enables Marketplace functionality. Requires a blockchain connection. | +| `eth-account` | Defines which blockchain account the node should use. | + +Codex uses sane defaults for most of its arguments. Here we specify some explicitly for the purpose of this walk-through. + +### 2. Sign of life + +Run the command : + +```bash +curl -X GET http://127.0.0.1:8080/api/codex/v1/debug/info +``` + +This GET request will return the node's debug information. The response will be in JSON and should look like: + +```json +{ + "id": "16Uiu2HAmJ3TSfPnrJNedHy2DMsjTqwBiVAQQqPo579DuMgGxmG99", + "addrs": [ + "/ip4/127.0.0.1/tcp/8070" + ], + "repo": "/Users/user/projects/nim-codex/Data1", + "spr": "spr:CiUIAhIhA1AL2J7EWfg7x77iOrR9YYBisY6CDtU2nEhuwDaQyjpkEgIDARo8CicAJQgCEiEDUAvYnsRZ-DvHvuI6tH1hgGKxjoIO1TacSG7ANpDKOmQQ2MWasAYaCwoJBH8AAAGRAh-aKkYwRAIgB2ooPfAyzWEJDe8hD2OXKOBnyTOPakc4GzqKqjM2OGoCICraQLPWf0oSEuvmSroFebVQx-3SDtMqDoIyWhjq1XFF", + "announceAddresses": [ + "/ip4/127.0.0.1/tcp/8070" + ], + "table": { + "localNode": { + "nodeId": "f6e6d48fa7cd171688249a57de0c1aba15e88308c07538c91e1310c9f48c860a", + "peerId": "16Uiu2HAmJ3TSfPnrJNedHy2DMsjTqwBiVAQQqPo579DuMgGxmG99", + "record": "...", + "address": "0.0.0.0:8090", + "seen": false + }, + "nodes": [] + }, + "codex": { + "version": "untagged build", + "revision": "b3e626a5" + } +} +``` + +| Field | Description | +| ------- | ---------------------------------------------------------------------------------------- | +| `id` | Id of the node. Also referred to as 'peerId'. | +| `addrs` | Multiaddresses currently open to accept connections from other nodes. | +| `repo` | Path of this node's data folder. | +| `spr` | Signed Peer Record, encoded information about this node and its location in the network. | +| `announceAddresses` | Multiaddresses used for annoucning this node +| `table` | Table of nodes present in the node's DHT +| `codex` | Codex version information + +### 3. Launch Node #2 + +We will need the signed peer record (SPR) from the first node that you got in the previous step. + +Replace `` in the following command with the SPR returned from the previous command. (Note that it should include the `spr:` at the beginning.) + +Open a new terminal and run: +- Mac/Linux: `"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=` +- Windows: `"build/codex.exe" --data-dir="Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=` + +Alternatively on Mac, Linux, or MSYS2 and a recent Codex binary you can run it in one command like: + +```sh +"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=$(curl -H "Accept: text/plain" http://127.0.0.1:8080/api/codex/v1/spr) +``` + +Notice we're using a new data-dir, and we've increased each port number by one. This is needed so that the new node won't try to open ports already in use by the first node. + +We're now also including the `bootstrap-node` argument. This allows us to link the new node to another one, bootstrapping our own little peer-to-peer network. (SPR strings always start with "spr:".) + +### 4. Connect The Two + +Normally the two nodes will automatically connect. If they do not automatically connect or you want to manually connect nodes you can use the peerId to connect nodes. + +You can get the first node's peer id by running the following command and finding the `"peerId"` in the results: + +```bash +curl -X GET -H "Accept: text/plain" http://127.0.0.1:8081/api/codex/v1/debug/info +``` + +Next replace `` in the following command with the peerId returned from the previous command: + +```bash +curl -X GET http://127.0.0.1:8080/api/codex/v1/connect/?addrs=/ip4/127.0.0.1/tcp/8071 +``` + +Alternatively on Mac, Linux, or MSYS2 and a recent Codex binary you can run it in one command like: + +```bash +curl -X GET http://127.0.0.1:8080/api/codex/v1/connect/$(curl -X GET -H "Accept: text/plain" http://127.0.0.1:8081/api/codex/v1/peerid)\?addrs=/ip4/127.0.0.1/tcp/8071 +``` + +Notice that we are sending the peerId and the multiaddress of node 2 to the `/connect` endpoint of node 1. This provides node 1 all the information it needs to communicate with node 2. The response to this request should be `Successfully connected to peer`. + +### 5. Upload The File + +We're now ready to upload a file to the network. In this example we'll use node 1 for uploading and node 2 for downloading. But the reverse also works. + +Next replace `` with the path to the file you want to upload in the following command: + +```bash + curl -H "Content-Type: application/octet-stream" -H "Expect: 100-continue" -T "" 127.0.0.1:8080/api/codex/v1/data -X POST +``` + +(Hint: if curl is reluctant to show you the response, add `-o ` to write the result to a file.) + +Depending on the file size this may take a moment. Codex is processing the file by cutting it into blocks and generating erasure-recovery data. When the process is finished, the request will return the content-identifier (CID) of the uploaded file. It should look something like `zdj7WVxH8HHHenKtid8Vkgv5Z5eSUbCxxr8xguTUBMCBD8F2S`. + +### 6. Download The File + +Replace `` with the identifier returned in the previous step. Replace `` with the filename where you want to store the downloaded file. + +```bash + curl 127.0.0.1:8081/api/codex/v1/data//network --output + ``` + +Notice we are connecting to the second node in order to download the file. The CID we provide contains the information needed to locate the file within the network. + +### 7. Verify The Results + +If your file is downloaded and identical to the file you uploaded, then this manual test has passed. Rejoice! If on the other hand that didn't happen or you were unable to complete any of these steps, please leave us a message detailing your troubles. + +## Notes + +When using the Ganache blockchain, there are some deviations from the expected behavior, mainly linked to how blocks are mined, which affects certain functionalities in the Sales module. +Therefore, if you are manually testing processes such as payout collection after a request is finished or proof submissions, you need to mine some blocks manually for it to work correctly. You can do this by using the following curl command: + +```bash +$ curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"evm_mine","params":[],"id":67}' 127.0.0.1:8545 +``` diff --git a/nim.cfg b/nim.cfg deleted file mode 100644 index ec563332..00000000 --- a/nim.cfg +++ /dev/null @@ -1,3 +0,0 @@ --d:"chronicles_log_level=INFO" ---warning:LockLevel:off ---warning:ObservableStores:off diff --git a/nimble.lock b/nimble.lock deleted file mode 100644 index d001256f..00000000 --- a/nimble.lock +++ /dev/null @@ -1,481 +0,0 @@ -{ - "version": 1, - "packages": { - "stew": { - "version": "0.1.0", - "vcsRevision": "6ad35b876fb6ebe0dfee0f697af173acc47906ee", - "url": "https://github.com/status-im/nim-stew.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "46d58c4feb457f3241e3347778334e325dce5268" - } - }, - "unittest2": { - "version": "0.0.4", - "vcsRevision": "f180f596c88dfd266f746ed6f8dbebce39c824db", - "url": "https://github.com/status-im/nim-unittest2.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "fa309c41eaf6ef57895b9e603f2620a2f6e11780" - } - }, - "httputils": { - "version": "0.3.0", - "vcsRevision": "689da19e9e9cfff4ced85e2b25c6b2b5598ed079", - "url": "https://github.com/status-im/nim-http-utils.git", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "4ad3ad68d13c50184180ab4b2eacc0bd7ed2ed44" - } - }, - "nimcrypto": { - "version": "0.5.4", - "vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00", - "url": "https://github.com/cheatfate/nimcrypto.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08" - } - }, - "questionable": { - "version": "0.10.2", - "vcsRevision": "6018fd43e033d5a5310faa45bcaa1b44049469a4", - "url": "https://github.com/status-im/questionable.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "36a6c012637c7736a390e74a7f94667bca562073" - } - }, - "upraises": { - "version": "0.1.0", - "vcsRevision": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2", - "url": "https://github.com/markspanbroek/upraises.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "a0243c8039e12d547dbb2e9c73789c16bb8bc956" - } - }, - "secp256k1": { - "version": "0.5.2", - "vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f", - "url": "https://github.com/status-im/nim-secp256k1.git", - "downloadMethod": "git", - "dependencies": [ - "stew", - "nimcrypto" - ], - "checksums": { - "sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e" - } - }, - "stint": { - "version": "0.0.1", - "vcsRevision": "036c71d06a6b22f8f967ba9d54afd2189c3872ca", - "url": "https://github.com/status-im/stint.git", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "0f187a2115315ca898e5f9a30c5e506cf6057062" - } - }, - "contractabi": { - "version": "0.4.4", - "vcsRevision": "b111c27b619fc1d81fb1c6942372824a18a71960", - "url": "https://github.com/status-im/nim-contract-abi", - "downloadMethod": "git", - "dependencies": [ - "stint", - "stew", - "nimcrypto", - "questionable", - "upraises" - ], - "checksums": { - "sha1": "3ed10b11eec8fe14a81e4e58dbc41f88fd6ddf7a" - } - }, - "nitro": { - "version": "0.5.1", - "vcsRevision": "6b4c455bf4dad7449c1580055733a1738fcd5aec", - "url": "https://github.com/status-im/nim-nitro.git", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "questionable", - "upraises", - "contractabi", - "secp256k1", - "stint", - "stew" - ], - "checksums": { - "sha1": "19d90deaeb84b19214dc2aab28a466f0bc4a7e2e" - } - }, - "bearssl": { - "version": "0.1.5", - "vcsRevision": "32e125015ae4251675763842366380795a91b722", - "url": "https://github.com/status-im/nim-bearssl.git", - "downloadMethod": "git", - "dependencies": [ - "unittest2" - ], - "checksums": { - "sha1": "c58a61e71c49ed7c7fe7608df40d60945f7c4bad" - } - }, - "chronos": { - "version": "3.0.11", - "vcsRevision": "17fed89c99beac5a92d3668d0d3e9b0e4ac13936", - "url": "https://github.com/status-im/nim-chronos.git", - "downloadMethod": "git", - "dependencies": [ - "stew", - "bearssl", - "httputils", - "unittest2" - ], - "checksums": { - "sha1": "f6fffc87571e5f76af2a77c4ebcc0e00909ced4e" - } - }, - "testutils": { - "version": "0.4.2", - "vcsRevision": "aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2", - "url": "https://github.com/status-im/nim-testutils", - "downloadMethod": "git", - "dependencies": [ - "unittest2" - ], - "checksums": { - "sha1": "94427e0cce0e0c5841edcd3a6530b4e6b857a3cb" - } - }, - "faststreams": { - "version": "0.3.0", - "vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09", - "url": "https://github.com/status-im/nim-faststreams.git", - "downloadMethod": "git", - "dependencies": [ - "stew", - "testutils", - "chronos", - "unittest2" - ], - "checksums": { - "sha1": "97edf9797924af48566a0af8267203dc21d80c77" - } - }, - "serialization": { - "version": "0.1.0", - "vcsRevision": "fcd0eadadde0ee000a63df8ab21dc4e9f015a790", - "url": "https://github.com/status-im/nim-serialization.git", - "downloadMethod": "git", - "dependencies": [ - "faststreams", - "unittest2", - "stew" - ], - "checksums": { - "sha1": "fef59519892cac70cccd81b612085caaa5e3e6cf" - } - }, - "json_serialization": { - "version": "0.1.0", - "vcsRevision": "c5f0e2465e8375dfc7aa0f56ccef67cb680bc6b0", - "url": "https://github.com/status-im/nim-json-serialization.git", - "downloadMethod": "git", - "dependencies": [ - "serialization", - "stew" - ], - "checksums": { - "sha1": "d89d79d0679a3a41b350e3ad4be56c0308cc5ec6" - } - }, - "chronicles": { - "version": "0.10.2", - "vcsRevision": "1682096306ddba8185dcfac360a8c3f952d721e4", - "url": "https://github.com/status-im/nim-chronicles.git", - "downloadMethod": "git", - "dependencies": [ - "testutils", - "json_serialization" - ], - "checksums": { - "sha1": "9a5bebb76b0f7d587a31e621d260119279e91c76" - } - }, - "presto": { - "version": "0.0.4", - "vcsRevision": "962bb588d19c7180e39f0d9f18131e75861bab20", - "url": "https://github.com/status-im/nim-presto.git", - "downloadMethod": "git", - "dependencies": [ - "chronos", - "chronicles", - "stew" - ], - "checksums": { - "sha1": "8d3e77d7ddf14606504fe86c430b1b5712aada92" - } - }, - "zlib": { - "version": "0.1.0", - "vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2", - "url": "https://github.com/status-im/nim-zlib", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c" - } - }, - "libbacktrace": { - "version": "0.0.8", - "vcsRevision": "ce966b1c469dda179b54346feaaf1a62202c984f", - "url": "https://github.com/status-im/nim-libbacktrace", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "ba7a2f3d21db894ace7bb4ebe0a5b06af995d68b" - } - }, - "dnsclient": { - "version": "0.1.2", - "vcsRevision": "fbb76f8af8a33ab818184a7d4406d9fee20993be", - "url": "https://github.com/ba0f3/dnsclient.nim.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "663239a914c814204b30dda6e0902cc0fbd0b8ee" - } - }, - "metrics": { - "version": "0.0.1", - "vcsRevision": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5", - "url": "https://github.com/status-im/nim-metrics.git", - "downloadMethod": "git", - "dependencies": [ - "chronos" - ], - "checksums": { - "sha1": "6274c7ae424b871bc21ca3a6b6713971ff6a8095" - } - }, - "asynctest": { - "version": "0.3.1", - "vcsRevision": "5347c59b4b057443a014722aa40800cd8bb95c69", - "url": "https://github.com/status-im/asynctest.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "53e0b610d13700296755a4ebe789882cae47a3b9" - } - }, - "websock": { - "version": "0.1.0", - "vcsRevision": "8a433c6ba43940b13ce56f83d79a93273ece5684", - "url": "https://github.com/status-im/nim-websock.git", - "downloadMethod": "git", - "dependencies": [ - "chronos", - "httputils", - "chronicles", - "stew", - "nimcrypto", - "bearssl", - "zlib" - ], - "checksums": { - "sha1": "1dbb4e1dd8c525c5674dca42b8eb25bdeb2f76b3" - } - }, - "libp2p": { - "version": "0.0.2", - "vcsRevision": "eeb3c210a37408716b6a8b45f578adf87610cef2", - "url": "https://github.com/status-im/nim-libp2p.git", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "dnsclient", - "bearssl", - "chronicles", - "chronos", - "metrics", - "secp256k1", - "stew", - "websock" - ], - "checksums": { - "sha1": "e9e9b93e6e425e47df1eea01a8e9efeac6e0fc97" - } - }, - "combparser": { - "version": "0.2.0", - "vcsRevision": "ba4464c005d7617c008e2ed2ebc1ba52feb469c6", - "url": "https://github.com/PMunch/combparser.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "a3635260961a893b88f69aac19f1b24e032a7e97" - } - }, - "protobuf_serialization": { - "version": "0.2.0", - "vcsRevision": "f7d671f877e01213494aac7903421ccdbe70616f", - "url": "https://github.com/status-im/nim-protobuf-serialization.git", - "downloadMethod": "git", - "dependencies": [ - "stew", - "faststreams", - "serialization", - "combparser" - ], - "checksums": { - "sha1": "9418459027d0d5eb30a974649dc615a76e8e4aca" - } - }, - "confutils": { - "version": "0.1.0", - "vcsRevision": "0435e67832b6bb8dfdf0ddb102903e9d820206d2", - "url": "https://github.com/status-im/nim-confutils.git", - "downloadMethod": "git", - "dependencies": [ - "stew" - ], - "checksums": { - "sha1": "1edab14b434aca6ae28e2385982fa60d623c600a" - } - }, - "news": { - "version": "0.5", - "vcsRevision": "e79420e835489132aaa412f993b565f5dd6295f4", - "url": "https://github.com/status-im/news", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "a5f1789bf650822156712fd3bdec1bf6ab4ac42e" - } - }, - "json_rpc": { - "version": "0.0.2", - "vcsRevision": "5a281760803907f4989cacf109b516381dfbbe11", - "url": "https://github.com/status-im/nim-json-rpc", - "downloadMethod": "git", - "dependencies": [ - "stew", - "nimcrypto", - "stint", - "chronos", - "httputils", - "chronicles", - "news", - "websock", - "json_serialization" - ], - "checksums": { - "sha1": "3ec28a4c9e5dcd3210e85dfcfdd0a6baf46eccbe" - } - }, - "ethers": { - "version": "0.1.7", - "vcsRevision": "270d358b869d02a4c625dde971f799db336670fb", - "url": "https://github.com/status-im/nim-ethers", - "downloadMethod": "git", - "dependencies": [ - "chronos", - "contractabi", - "questionable", - "upraises", - "json_rpc", - "stint", - "stew" - ], - "checksums": { - "sha1": "3eb78a87744d5894595f33a36b21348a59d8f1a5" - } - }, - "libp2pdht": { - "version": "0.0.1", - "vcsRevision": "9a872518d621bf8b390f88cd65617bca6aca1d2d", - "url": "https://github.com/status-im/nim-libp2p-dht.git", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "bearssl", - "chronicles", - "chronos", - "libp2p", - "metrics", - "protobuf_serialization", - "secp256k1", - "stew", - "stint", - "asynctest" - ], - "checksums": { - "sha1": "d97e8b751e11ccc7e059b79fb1a046d2b0d0e872" - } - }, - "lrucache": { - "version": "1.2.1", - "vcsRevision": "8767ade0b76ea5b5d4ce24a52d0c58a6ebeb66cd", - "url": "https://github.com/status-im/lrucache.nim", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "2c4365d10029d6f6a8b92a712e9002ac3886b07d" - } - }, - "leopard": { - "version": "0.0.1", - "vcsRevision": "2a6a63923e9b95676b5ae7ff2c346be0e63e753c", - "url": "https://github.com/status-im/nim-leopard", - "downloadMethod": "git", - "dependencies": [ - "stew", - "unittest2", - "upraises" - ], - "checksums": { - "sha1": "e71db348018eab26f3059e1c03bf3088c5109cfe" - } - }, - "taskpools": { - "version": "0.0.3", - "vcsRevision": "8d408ac6cfc9c24ec8b7b65d5993e85050dcbaa9", - "url": "https://github.com/status-im/nim-taskpools.git", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "37bbbbb03d9b893af6980592624211ab057392c0" - } - }, - "blscurve": { - "version": "0.0.1", - "vcsRevision": "0237e4e0e914fc19359c18a66406d33bc942775c", - "url": "https://github.com/status-im/nim-blscurve", - "downloadMethod": "git", - "dependencies": [ - "nimcrypto", - "stew", - "taskpools" - ], - "checksums": { - "sha1": "65f58854ffd2098e0d0ca08f6ea0efb3c27529e0" - } - } - } -} diff --git a/openapi.yaml b/openapi.yaml new file mode 100644 index 00000000..94450bf3 --- /dev/null +++ b/openapi.yaml @@ -0,0 +1,773 @@ +openapi: 3.0.3 + +info: + version: 0.0.1 + title: Codex API + description: "List of endpoints and interfaces available to Codex API users" + +security: + - { } + +components: + schemas: + MultiAddress: + type: string + description: Address of node as specified by the multi-address specification https://multiformats.io/multiaddr/ + example: /ip4/127.0.0.1/tcp/8080 + + PeerId: + type: string + description: Peer Identity reference as specified at https://docs.libp2p.io/concepts/fundamentals/peers/ + example: QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N + + Id: + type: string + description: 32bits identifier encoded in hex-decimal string. + example: 0x... + + BigInt: + type: string + description: Integer represented as decimal string + + Cid: + type: string + description: Content Identifier as specified at https://github.com/multiformats/cid + example: QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N + + SlotId: + type: string + description: Keccak hash of the abi encoded tuple (RequestId, slot index) + example: 268a781e0db3f7cf36b18e5f4fdb7f586ec9edd08e5500b17c0e518a769f114a + + LogLevel: + type: string + description: "One of the log levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL" + example: DEBUG + + EthereumAddress: + type: string + description: Address of Ethereum address + + Reward: + type: string + description: The maximum amount of tokens paid per second per slot to hosts the client is willing to pay + + Duration: + type: string + description: The duration of the request in seconds as decimal string + + ProofProbability: + type: string + description: How often storage proofs are required as decimal string + + Expiry: + type: string + description: A timestamp as seconds since unix epoch at which this request expires if the Request does not find requested amount of nodes to host the data. + default: 10 minutes + + SPR: + type: string + description: Signed Peer Record (libp2p) + + SPRRead: + type: object + properties: + spr: + $ref: "#/components/schemas/SPR" + + PeerIdRead: + type: object + properties: + id: + $ref: "#/components/schemas/PeerId" + + ErasureParameters: + type: object + properties: + totalChunks: + type: integer + + PoRParameters: + description: Parameters for Proof of Retrievability + type: object + properties: + u: + type: string + publicKey: + type: string + name: + type: string + + Content: + type: object + description: Parameters specifying the content + properties: + cid: + $ref: "#/components/schemas/Cid" + erasure: + $ref: "#/components/schemas/ErasureParameters" + por: + $ref: "#/components/schemas/PoRParameters" + + DebugInfo: + type: object + properties: + id: + $ref: "#/components/schemas/PeerId" + addrs: + type: array + items: + $ref: "#/components/schemas/MultiAddress" + repo: + type: string + description: Path of the data repository where all nodes data are stored + spr: + $ref: "#/components/schemas/SPR" + + SalesAvailability: + type: object + properties: + id: + $ref: "#/components/schemas/Id" + totalSize: + type: string + description: Total size of availability's storage in bytes as decimal string + duration: + $ref: "#/components/schemas/Duration" + minPrice: + type: string + description: Minimum price to be paid (in amount of tokens) as decimal string + maxCollateral: + type: string + description: Maximum collateral user is willing to pay per filled Slot (in amount of tokens) as decimal string + + SalesAvailabilityREAD: + allOf: + - $ref: "#/components/schemas/SalesAvailability" + - type: object + properties: + freeSize: + type: string + description: Unused size of availability's storage in bytes as decimal string + + SalesAvailabilityCREATE: + allOf: + - $ref: "#/components/schemas/SalesAvailability" + - required: + - totalSize + - minPrice + - maxCollateral + - duration + + Slot: + type: object + properties: + id: + $ref: "#/components/schemas/SlotId" + request: + $ref: "#/components/schemas/StorageRequest" + slotIndex: + type: string + description: Slot Index as hexadecimal string + + Reservation: + type: object + properties: + id: + $ref: "#/components/schemas/Id" + availabilityId: + $ref: "#/components/schemas/Id" + size: + $ref: "#/components/schemas/BigInt" + requestId: + $ref: "#/components/schemas/Id" + slotIndex: + type: string + description: Slot Index as hexadecimal string + + StorageRequestCreation: + type: object + required: + - reward + - duration + - proofProbability + - collateral + - expiry + properties: + duration: + $ref: "#/components/schemas/Duration" + reward: + $ref: "#/components/schemas/Reward" + proofProbability: + $ref: "#/components/schemas/ProofProbability" + nodes: + description: Minimal number of nodes the content should be stored on + type: integer + default: 1 + tolerance: + description: Additional number of nodes on top of the `nodes` property that can be lost before pronouncing the content lost + type: integer + default: 0 + collateral: + type: string + description: Number as decimal string that represents how much collateral is asked from hosts that wants to fill a slots + expiry: + type: string + description: Number as decimal string that represents expiry threshold in seconds from when the Request is submitted. When the threshold is reached and the Request does not find requested amount of nodes to host the data, the Request is voided. The number of seconds can not be higher then the Request's duration itself. + StorageAsk: + type: object + required: + - reward + properties: + slots: + description: Number of slots (eq. hosts) that the Request want to have the content spread over + type: integer + slotSize: + type: string + description: Amount of storage per slot (in bytes) as decimal string + duration: + $ref: "#/components/schemas/Duration" + proofProbability: + $ref: "#/components/schemas/ProofProbability" + reward: + $ref: "#/components/schemas/Reward" + maxSlotLoss: + type: integer + description: Max slots that can be lost without data considered to be lost + + StorageRequest: + type: object + properties: + id: + type: string + description: Request ID + client: + $ref: "#/components/schemas/EthereumAddress" + ask: + $ref: "#/components/schemas/StorageAsk" + content: + $ref: "#/components/schemas/Content" + expiry: + $ref: "#/components/schemas/Expiry" + nonce: + type: string + description: Random data + + Purchase: + type: object + properties: + state: + type: string + description: Description of the Request's state + error: + type: string + description: If Request failed, then here is presented the error message + request: + $ref: "#/components/schemas/StorageRequest" + + DataList: + type: object + properties: + content: + type: array + items: + $ref: "#/components/schemas/DataItem" + + DataItem: + type: object + properties: + cid: + $ref: "#/components/schemas/Cid" + manifest: + $ref: "#/components/schemas/ManifestItem" + + ManifestItem: + type: object + properties: + rootHash: + $ref: "#/components/schemas/Cid" + description: "Root hash of the content" + originalBytes: + type: integer + format: int64 + description: "Length of original content in bytes" + blockSize: + type: integer + description: "Size of blocks" + protected: + type: boolean + description: "Indicates if content is protected by erasure-coding" + + Space: + type: object + properties: + totalBlocks: + description: "Number of blocks stored by the node" + type: integer + format: int64 + quotaMaxBytes: + type: integer + format: int64 + description: "Maximum storage space used by the node" + quotaUsedBytes: + type: integer + format: int64 + description: "Amount of storage space currently in use" + quotaReservedBytes: + type: integer + format: int64 + description: "Amount of storage space reserved" + +servers: + - url: "http://localhost:8080/api/codex/v1" + +tags: + - name: Marketplace + description: Marketplace information and operations + - name: Data + description: Data operations + - name: Node + description: Node management + - name: Debug + description: Debugging configuration + +paths: + "/connect/{peerId}": + get: + summary: "Connect to a peer" + description: | + If `addrs` param is supplied, it will be used to dial the peer, otherwise the `peerId` is used + to invoke peer discovery, if it succeeds the returned addresses will be used to dial. + tags: [ Node ] + operationId: connectPeer + parameters: + - in: path + name: peerId + required: true + schema: + $ref: "#/components/schemas/PeerId" + description: Peer that should be dialed. + - in: query + name: addrs + schema: + type: array + nullable: true + items: + $ref: "#/components/schemas/MultiAddress" + description: | + If supplied, it will be used to dial the peer. + The address has to target the listening address of the peer, + which is specified with the `--listen-addrs` CLI flag. + + responses: + "200": + description: Successfully connected to peer + "400": + description: Peer either not found or was not possible to dial + + "/data": + get: + summary: "Lists manifest CIDs stored locally in node." + tags: [ Data ] + operationId: listData + responses: + "200": + description: Retrieved list of content CIDs + content: + application/json: + schema: + $ref: "#/components/schemas/DataList" + + "400": + description: Invalid CID is specified + "404": + description: Content specified by the CID is not found + "500": + description: Well it was bad-bad + post: + summary: "Upload a file in a streaming manner. Once finished, the file is stored in the node and can be retrieved by any node in the network using the returned CID." + tags: [ Data ] + operationId: upload + requestBody: + content: + application/octet-stream: + schema: + type: string + format: binary + responses: + "200": + description: CID of uploaded file + content: + text/plain: + schema: + type: string + "500": + description: Well it was bad-bad and the upload did not work out + + "/data/{cid}": + get: + summary: "Download a file from the local node in a streaming manner. If the file is not available locally, a 404 is returned." + tags: [ Data ] + operationId: downloadLocal + parameters: + - in: path + name: cid + required: true + schema: + $ref: "#/components/schemas/Cid" + description: File to be downloaded. + + responses: + "200": + description: Retrieved content specified by CID + content: + application/octet-stream: + schema: + type: string + format: binary + "400": + description: Invalid CID is specified + "404": + description: Content specified by the CID is unavailable locally + "500": + description: Well it was bad-bad + + "/data/{cid}/network": + get: + summary: "Download a file from the network in a streaming manner. If the file is not available locally, it will be retrieved from other nodes in the network if able." + tags: [ Data ] + operationId: downloadNetwork + parameters: + - in: path + name: cid + required: true + schema: + $ref: "#/components/schemas/Cid" + description: "File to be downloaded." + responses: + "200": + description: Retrieved content specified by CID + content: + application/octet-stream: + schema: + type: string + format: binary + "400": + description: Invalid CID is specified + "404": + description: Content specified by the CID is not found + "500": + description: Well it was bad-bad + + "/space": + get: + summary: "Gets a summary of the storage space allocation of the node." + tags: [ Data ] + operationId: space + responses: + "200": + description: "Summary of storage allocation" + content: + application/json: + schema: + $ref: "#/components/schemas/Space" + + "500": + description: "It's not working as planned" + + "/sales/slots": + get: + summary: "Returns active slots" + tags: [ Marketplace ] + operationId: getActiveSlots + responses: + "200": + description: Retrieved active slots + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Slot" + + "503": + description: Sales are unavailable + + "/sales/slots/{slotId}": + get: + summary: "Returns active slot with id {slotId} for the host" + tags: [ Marketplace ] + operationId: getActiveSlotById + parameters: + - in: path + name: slotId + required: true + schema: + $ref: "#/components/schemas/Cid" + description: File to be downloaded. + responses: + "200": + description: Retrieved active slot + content: + application/json: + schema: + $ref: "#/components/schemas/Slot" + + "400": + description: Invalid or missing SlotId + + "404": + description: Host is not in an active sale for the slot + + "503": + description: Sales are unavailable + + "/sales/availability": + get: + summary: "Returns storage that is for sale" + tags: [ Marketplace ] + operationId: getOfferedStorage + responses: + "200": + description: Retrieved storage availabilities of the node + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/SalesAvailability" + "500": + description: Error getting unused availabilities + "503": + description: Sales are unavailable + + post: + summary: "Offers storage for sale" + operationId: offerStorage + tags: [ Marketplace ] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/SalesAvailabilityCREATE" + responses: + "201": + description: Created storage availability + content: + application/json: + schema: + $ref: "#/components/schemas/SalesAvailabilityREAD" + "400": + description: Invalid data input + "422": + description: Not enough node's storage quota available + "500": + description: Error reserving availability + "503": + description: Sales are unavailable + "/sales/availability/{id}": + patch: + summary: "Updates availability" + description: | + The new parameters will be only considered for new requests. + Existing Requests linked to this Availability will continue as is. + operationId: updateOfferedStorage + tags: [ Marketplace ] + parameters: + - in: path + name: id + required: true + schema: + type: string + description: ID of Availability + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/SalesAvailability" + responses: + "204": + description: Availability successfully updated + "400": + description: Invalid data input + "404": + description: Availability not found + "422": + description: Not enough node's storage quota available + "500": + description: Error reserving availability + "503": + description: Sales are unavailable + + "/sales/availability/{id}/reservations": + patch: + summary: "Get availability's reservations" + description: Return's list of Reservations for ongoing Storage Requests that the node hosts. + operationId: getReservations + tags: [ Marketplace ] + parameters: + - in: path + name: id + required: true + schema: + type: string + description: ID of Availability + responses: + "200": + description: Retrieved storage availabilities of the node + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Reservation" + "400": + description: Invalid Availability ID + "404": + description: Availability not found + "500": + description: Error getting reservations + "503": + description: Sales are unavailable + + "/storage/request/{cid}": + post: + summary: "Creates a new Request for storage" + tags: [ Marketplace ] + operationId: createStorageRequest + parameters: + - in: path + name: cid + required: true + schema: + $ref: "#/components/schemas/Cid" + description: CID of the uploaded data that should be stored + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/StorageRequestCreation" + responses: + "200": + description: Returns the Request ID as decimal string + content: + text/plain: + schema: + type: string + "400": + description: Invalid or missing Request ID + "404": + description: Request ID not found + "503": + description: Purchasing is unavailable + + "/storage/purchases": + get: + summary: "Returns list of purchase IDs" + tags: [ Marketplace ] + operationId: getPurchases + responses: + "200": + description: Gets all purchase IDs stored in node + content: + application/json: + schema: + type: array + items: + type: string + "503": + description: Purchasing is unavailable + + "/storage/purchases/{id}": + get: + summary: "Returns purchase details" + tags: [ Marketplace ] + operationId: getPurchase + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Hexadecimal ID of a Purchase + responses: + "200": + description: Purchase details + content: + application/json: + schema: + $ref: "#/components/schemas/Purchase" + "400": + description: Invalid or missing Purchase ID + "404": + description: Purchase not found + "503": + description: Purchasing is unavailable + + "/node/spr": + get: + summary: "Get Node's SPR" + operationId: getSPR + tags: [ Node ] + responses: + "200": + description: Node's SPR + content: + plain/text: + schema: + $ref: "#/components/schemas/SPR" + application/json: + schema: + $ref: "#/components/schemas/SPRRead" + "503": + description: Node SPR not ready, try again later + + "/node/peerid": + get: + summary: "Get Node's PeerID" + operationId: getPeerId + tags: [ Node ] + responses: + "200": + description: Node's Peer ID + content: + plain/text: + schema: + $ref: "#/components/schemas/PeerId" + application/json: + schema: + $ref: "#/components/schemas/PeerIdRead" + + "/debug/chronicles/loglevel": + post: + summary: "Set log level at run time" + tags: [ Debug ] + operationId: setDebugLogLevel + + parameters: + - in: query + name: level + required: true + schema: + $ref: "#/components/schemas/LogLevel" + + responses: + "200": + description: Successfully log level set + "400": + description: Invalid or missing log level + "500": + description: Well it was bad-bad + + "/debug/info": + get: + summary: "Gets node information" + operationId: getDebugInfo + tags: [ Debug ] + responses: + "200": + description: Node's information + content: + application/json: + schema: + $ref: "#/components/schemas/DebugInfo" diff --git a/tests/asynctest.nim b/tests/asynctest.nim new file mode 100644 index 00000000..7c6a4afd --- /dev/null +++ b/tests/asynctest.nim @@ -0,0 +1,3 @@ +import pkg/asynctest/chronos/unittest + +export unittest diff --git a/tests/checktest.nim b/tests/checktest.nim new file mode 100644 index 00000000..8ca5c53e --- /dev/null +++ b/tests/checktest.nim @@ -0,0 +1,28 @@ +import ./helpers + +## Unit testing suite that calls checkTrackers in teardown to check for memory leaks using chronos trackers. +template checksuite*(name, body) = + suite name: + proc suiteProc = + multisetup() + + teardown: + checkTrackers() + + body + + suiteProc() + +template asyncchecksuite*(name, body) = + suite name: + proc suiteProc = + asyncmultisetup() + + teardown: + checkTrackers() + + body + + suiteProc() + +export helpers diff --git a/tests/circuits/fixtures/input.json b/tests/circuits/fixtures/input.json new file mode 100644 index 00000000..0de9bcfc --- /dev/null +++ b/tests/circuits/fixtures/input.json @@ -0,0 +1,527 @@ +{ + "dataSetRoot": "16074246370508166450132968585287196391860062495017081813239200574579640171677" +, "entropy": "1234567" +, "nCellsPerSlot": 512 +, "nSlotsPerDataSet": 11 +, "slotIndex": 3 +, "slotRoot": "20744935707483803411869804102043283881376973626291244537230284476834672019997" +, "slotProof": + [ "14279309641024220656349577745390262299143357053971618723978902485113885925133" + , "17350220251883387610715080716935498684002984280929482268590417788651882821293" + , "3614172556528990402229172918446087216573760062512459539027101853103043539066" + , "9593656216696187567506330076677122799107266567595923589824071605501987205034" + , "0" + , "0" + , "0" + , "0" + ] +, "cellData": + [ [ "211066599696340205996365563960462032209214145564176017965177408819390441927" + , "256399834032317991719099034134771774537377713676282398278615627599320306708" + , "40526956212941839024868120947422067322935297516255336725720469887577875470" + , "369406626072040375689238003388146123438765868500054546379159741776926336393" + , "333671948877941061129138970028865844558745266314244224980179694307884999701" + , "12844191661993811401197260475054004253686019503294245287625435635597431176" + , "103242505924551040184986153577926463300854479121445566984435820844932904541" + , "357267598134410031301285377503939045679462829143562392803919346036584141082" + , "162961530392479745020607594774288130869393650464001426863668385541077786641" + , "426633666684068667053061341108297711407154520752589357873877687002123230254" + , "131217200633679697678620573903653316618851166977399592012996945149000115543" + , "347806146382240882424437318028620747118202052207615339887983883245341422889" + , "373560143578047669373240014805175743607410583882636003337120578008437374619" + , "188643112726610812698950916978832639394489168469865816132268079928321994342" + , "261643693073361806247543578456646407192325702146752104760117340650316255422" + , "260425332276127964154119199351807753107358504026338378706506655351595199132" + , "374895089121103633563000003194929058314955925591115574099805066048344387554" + , "251251687633538360627384151887287228926166832908633974282160065378311171354" + , "72870348025150463132527129203816383011245664502016571773976961034605631401" + , "234969517550818492643515432666311755833657223377594670026839098818269671638" + , "250704662734070531273640113211555977086430125469327371276827055724111014200" + , "85287059658255939741261887818611116570376488685014337052369839580946343903" + , "148959658976765873884541474400081762855732313547557243964921157967555048302" + , "402116967301520959272239788104745348344918207829380669065019055837727389479" + , "440503687192139964066297025050080823601005280790824714962651368433530759519" + , "149064344353438643307231355233484617851197634669308957706338957575177745645" + , "249140840255377018814348914718469942883867200392561109698520706525194687651" + , "108796851176515124780842490199733462942992752881710253277179665118758071359" + , "168245155425564161902686596247453762364112240129335852645432169468767513906" + , "129930759770229612264501691941454447321585806001002088749773920103899362070" + , "26204732465033162738545933008873662562758704699080684615280202127289894343" + , "434343986187775289860542785529657444690073100436887472033336117760907652966" + , "361202432740487795596808128962740911600093857340619816047190021218849540225" + , "100616813001075101816809823591611435583084916492802624686700765550893945525" + , "262383566766515164611427346701355047932794351290691325516723194829671679460" + , "223966317895663049002893008659178463136086169222436544014405848127792334099" + , "416071800998333357259662686053338495720384342746822618737948080251761863079" + , "402582378631671531909245563300554883898015688826468151075059467077182712018" + , "271682867846395286938993638577506552857925968097084028550962231439839229096" + , "447239701195120035004067146414333903010840427278848992921567785105217019890" + , "275718367441702584521943480326858762208121719038684001399322597215141179102" + , "86424132534636958411139699704035287483951667275825975536719519441147485752" + , "149332313586183975744469174256094358432564607635406143904268565140988616920" + , "431284330776421588418608279008210842837281123158951642164956884286883748089" + , "328694410745471749523135644660455669483988686888634622076863114197617693825" + , "112671940998917362968156144648543607958275336559773039070338509488400224090" + , "40612851250697989627190554726382690498263128439797780029697069621854862060" + , "235047914228675997216342104196597257178021277585839376175077878186492271543" + , "169718735151210135199527910197065439221144015957220768545119706561079163228" + , "345850109040121443591415752965486014695272086901102608769402892906795715635" + , "107916794601837835951508003838161872232087225679609623116098837565956752373" + , "415195406060373162425374246348197423165008252112453298469572523506488563795" + , "18574536720926634955170276058049993354780447816096707123565164996905722992" + , "77316964375201096801231570737992491072607736322255206149311341101525354519" + , "198566211140075666401818378905444403588640345933666108724809349396921957675" + , "71926050707400318807625168942501384117254391471312636171205913503928815127" + , "303403754792341398295052586417779761162194818661412867094096550880325459639" + , "444796230931706624375881141460151785952798771079111017071124833045933389733" + , "430832727519144759265424205289522948157007336118070755365887670730658782114" + , "75431213985866235726407973044434444984663930761207296437571668004273515965" + , "9242635103653159191249730220870735855877366952081272723035956668095954838" + , "93770678769846326584848478152412123903909949977598807336203128684179492141" + , "438043261966019084676377174988310087831395864304423411701911688757793135582" + , "175357918416563657734972138036003712114814934655676571874083109097142591069" + , "301619681954194702458985259161884119574424456150215738560639417824784261940" + , "376627771252167062559065889174364784087884871999807562320457079200311413098" + , "77407" + ] + , [ "424838222746158988229624788694939151178615656210585621868910231014323837551" + , "113188486246955346418956949679485575685258850346101035778277727456423482970" + , "275449978206132565019222655023969430014622832597654643418394602485645803413" + , "407856757018138010439232009766252068440920591566039673660874626878413077905" + , "433369046493777496016384947949950877133856528218602671493669395706908819748" + , "258364166531180422149545015891786023981872586904946376136311798402581278793" + , "111997719221028147522803956659709775148434180015507797582340706052412284571" + , "370086597976426508280413876667101417393598181708677558733730556109327409076" + , "394139601979449259075910238117153992797849553309541269624848742084205563806" + , "224088276319080487199395482893988152025671468715318212801266537036640477323" + , "412710245119809501914481942088314642684754542082140451180970198371889738885" + , "353872602359946553306242341162348980635834907495492814598834657175405697176" + , "252575199506028475372678621140654219936768774012823764047177692104580452933" + , "259093153824033122452869556249315839899366493071746219770487886456301642099" + , "433829976798312333371154167497560676982294392632725612538640639617101218872" + , "69918581382122563555200898078544150952625715196904114153232367538572342772" + , "337937520623192257595352158476909569245988839238498098464935654555688460123" + , "264739973232292969253318643276671532055689422253184191167449284055983944338" + , "326116252818829775096345069850111970510714050346103409479803743342138260656" + , "236666502377413649728378889488706275212721356921124776708954261777813709815" + , "211625935799984260567718228446525455893664313064841539301444509150157287163" + , "60213206239417039999880027112341976360540689886703427811513517396638607512" + , "68310118105957780876770075529546844404225720757669797609686816545988561625" + , "423863085551351065136684030270731679105571943009795949621903966660399419936" + , "388914614294393005039878123500859325222684672184567792659076815268598434245" + , "449456790291560508709069826219925144971979653209755565240911568965768874382" + , "448810363770763694447869940916735951256986784286793489549428379909616059117" + , "93646909783664049092056237949587618925209622020026157405117796611689551192" + , "352210795298632954574896499649181574584074853828419384742874364724522457331" + , "37455517056393404525863484733101879886413925183061645520768912552476716150" + , "386617357584684336812125385078476270301738184058813703112840991226785114117" + , "309940292044597334261558429176136686101590729982259514472573656131579113438" + , "375815246167575100319857872432879650174355611853064771241069582477717074415" + , "332214507344122806007757734266883566559371568252728459951766124888176633706" + , "148990259460952914990881100852534318351247069504848477833147446514732789712" + , "328669527889838880414072022433859139004058211332184916573516704632073044118" + , "39278026039348543645873027549112998051601664395028652771103624299930924528" + , "147717660530589785119644237145092759103012624422229579698752386490700965238" + , "374018518701362017594752095877197725242352803195413267746619111489936980685" + , "19185486483883210730969367354195688373879769005461272861759636600984416877" + , "61866046506558157021682973167090213780467780519546382332208868591026703563" + , "186854966504766517012887726614015646154225796572138017810371160981778288347" + , "87813550250328892091332566928942844770632705056120813488729800874811845697" + , "207775163424060266085108794048371834145545842567796157378282772999548202308" + , "369987573847786237689249538753881486995686208870889713177975415012214427429" + , "240880979016395044518849230927615466120209140082149273390921042537474853143" + , "174902051454932913375934735427101804474275543418199101786687925733405159872" + , "342217255652950822372803598682842961053537267723988087801275319754065261308" + , "403207518067666945448161377960706451817747922771285796668778802535227939962" + , "407191459999036791052261270163259267557900498930952521056725210031161568230" + , "338216472523551728793268225845396218561132966393725938551091882807069657206" + , "118364222678576550870375112494142500603091119946985793934499723872824782886" + , "269721611028665323587192624288165848310917029418261851436925829954710472436" + , "227424498125745236125352117206136621428869662458452610254773560636280935711" + , "334380807433339401906359555583987757411855090694162252260781648609761248049" + , "42470806516174819075107446234449247453971524726021445768611797530804156161" + , "418994916402918322951830716592888390611524984156817012683478842068581638820" + , "363263142412048420546716019774090729399081311227606555141174736853886128407" + , "192292715923468025058557166341646729623133127372303791236447550026886802680" + , "450253878713722337767292128303865371116770532625906889925779639839924402495" + , "412596147086332805611392200560087191411541689130482740065137300252639590489" + , "264059866105067484811456793906835462997849523506366903974724979936196358724" + , "80922384260325792825608274004101667366364502159441286540209512108302572137" + , "69261112192907071823876894642934902051254647002357333297635511793652550535" + , "342810644068354896385837929029331085645928473943142618800192452300937098227" + , "228361826362950356801549793202622774869100858404479869989505302905936946659" + , "89244" + ] + , [ "359926778925154809567585559910064420821311221299937568759183366972901588855" + , "128688825421597555299113005875649976344934035310192572516197551102045945994" + , "225379354482053502673695304662016054746658452775856157938886424152134693969" + , "321872319934407904743844034025550739031781361848065513098922085967524180784" + , "250375637325405951645070615947051799520095842815922754899017741501395744611" + , "97676493052788080672307677749501730337551225267342432472194527468634722352" + , "140101187036396881926000630022834247990512766860086619783437252676747730662" + , "428833039353549335335605804240430918631639449874968791377641834408506136850" + , "418359203734539413977740838354554804415161215624809316660001820037711273005" + , "197411877418795659129213175603102238904459737200167987167255995825203749339" + , "221646252100316559257470803343058462853641953756978011126414924636869625612" + , "106393540293584181037890192557883541231531964825708650997196071036779482686" + , "121473330828208543539643554911190528237124877123871673169194452404939833883" + , "234055622144947293638512253368547046093971383516706577723613696162225606040" + , "68307451767502390304445005915787226559811433450721625085741437522802389574" + , "446891883436763112014492564462451523127134145501201571918449345324780633462" + , "83718652783543018019599555197511164121642363321504039439786358267060414949" + , "90267297500929836073049162292104311427365986272517761342871530827272320168" + , "398425606698859520268856768787424690872952910789754531465894080258173664751" + , "323570139379118444589557840594603212198136718240764273769210492735883659788" + , "318597103584099056378057647488068323974418467250708490151864712850204121402" + , "6299083430773359277240726214182464517380839990956316267425262319606033077" + , "27638206326925436960316131682014727983280820447721477666884742925275976240" + , "434344186848869917381375812528446841024676181532946456237439060027443649574" + , "64735754118644738348599761514174981324344130078598038275246522384474432918" + , "53068717269762105498508401788249005564862415051761175636612434108259085043" + , "35813044996911619267309099508360887777226716179396659295580849861836012116" + , "67751791392924142809580984450371772015056060429352159361446914484238646676" + , "68534949135677447506316576616742207938855454921330757052467057435206318183" + , "98510151949547604999069864337574320742530406479752176012935179772005228326" + , "342190252152505345443004241184891966319091967630257822491352072978326623645" + , "362701658859425316334005554473186516818256386066010799465369887406035738447" + , "266999116654850467726292928465517542818678046748008340458185725047959981772" + , "227089355966197874086821090531951502393729872265201602128464978982907992285" + , "240800343500959216904535208047288234867926058830277460630902462914796702354" + , "447956858573680756485556898469710354624642653441041335815079102198306530583" + , "89422712944117481549242421245588048728782658978853365197341587057196539094" + , "72610343179362050463955730204044877712105879926618304878262944723464870506" + , "8676698500519447254981838968537883138182811064381378248657969913325524054" + , "180453700216061196739413267121764366438386031039311941313645439527087166894" + , "63346784016053253727621352882430122335280702556586808389293772066791583857" + , "400031453850139978805133735852120986024101930860924735862305444817383365395" + , "230104622290558570218036071349472289358926019290368625724986905348610140188" + , "175689489221336091369196327293045073133701056385414159213338224521167050830" + , "73310331103509697419315970265031228794034932318600293733074730812482185479" + , "371383255255842707875498538452907102684511927320158672347778293876877893808" + , "165319345890230193939972313953881372394171342391835626454759321320435952720" + , "184753541001210613115361457830691571384268642766106935428207829332011259768" + , "378810733004878432563271790625801205570962762395854282745166380274493181314" + , "86321674336629444862383262780020871828941143514651008200999725989892879714" + , "332634533993388248915777870988529817692938793120418377552247997050250349749" + , "41742010257820712511267405684781534740157292266212120171929853763272599516" + , "224101330592139734390658213442359402546038743346294438455635537496290117560" + , "204363902046137087420878796391135987770418514008394389852388361468850216359" + , "296526036888463156867153847102999031430641220536577556044554029990778763710" + , "137568796227115931047082828377698464185467276723279763304686078869351280509" + , "147456720891843338735232645318045656082153590170441596326500204393398792771" + , "297291342309626392877004635010131510068476699687818509485687346071074942006" + , "20748013593486831233186810444485136836664689496258849465507672301203832324" + , "335431726883875036252568773858744219410188302724423071283643221351691013313" + , "50487384098835523033417523223562560857744547945136829388944024807752630716" + , "425952679139710019732649156938676226492714486376811103362264146658191708598" + , "439787938069461539508728805881194071103269524405653997968488488049426387373" + , "279863410796988495259178322026394289028023166886112128504434877538089779477" + , "398941099058270093463626870965433502581301413791423667994257456160377865247" + , "5759692644185723491187478313102540786562625675495805072053262277490225012" + , "115176" + ] + , [ "199440901482393381753657315848210955092644686957900737971520778206058989647" + , "339215657660349719251187938243949119362753238126598750470832739830379601048" + , "17957417011314891245400567671664723859427624136284133790923936126779445290" + , "294761585889095249928492042608516765584723814657871392207964321318076158536" + , "367304199921887970655898837985855454346911090426896946930048519042744277770" + , "173405546837606747721292792526074597519538685230485266741646923399938591491" + , "13202798104529529703580600642858924379886936325402696094624200032343206719" + , "28211272278315691894282764239983301742024079691520980592618486887749800025" + , "73792448247120972778500624350664849847034095641998271809779791788652649022" + , "386961947078838359430674078072441680475090687247027225632133013772954043342" + , "247859266401821616700765969075270662915024391205665146199401830650793676517" + , "243938047874995926342875119559105623088213951205962677439167259642163766960" + , "14909501249861872673329370269106359532506159818320693170564856401208688898" + , "200331653478898243177761429526240803993101536716611440775588088625522029071" + , "127891684617049394579738365914860024509007913559921966744972525605976847919" + , "202912167983786187592861727924433749852786012202809544200943965898118027816" + , "176370650316309755425558132466370508977563252342126855874617990006444464573" + , "179490319446297562731655155074196383539396893457024237113284509223965454107" + , "118703379899134287650980989454755985628620830085932176414465582081598659194" + , "102025594191113707886629488454876652037341696284939671367279050169222988689" + , "421132375430104331136732058548913808473025321492255503838896123601628815453" + , "328334791815856213206267892694535121226673194052951034497930366807851111845" + , "83012322813281668737061895967682970093636853452224812061110092135287899376" + , "329204708391107220275172926348002826875172581958734129645882445887919889321" + , "410748869385474485539728045765785256294532845070137964737879849265390625591" + , "197274807717335387012872999914232051341799797613667869923402386359379902675" + , "235713095185988155553500217595661312303861624791720350423698435045853678746" + , "150631584359141913552843813384841153102535679219851913018874172414598353488" + , "207783836843813911284913666774420250626019971129676431904589416081127483900" + , "15728034718954665549174230921445077500399069880555428849958014406422697976" + , "69799423545177501667748653663121504155622623013014583690766769624482972893" + , "265665371394145560256710553779588458846778960884758560117805511822299802326" + , "149195925869584039415331014261414953602245337159354350672030141190471260449" + , "162328395279114064180857718453973759378615891406692054752029241989300597156" + , "104643123291849369328362168243087456326274773811561124607060302871149280568" + , "320704123383295724141970902124509237736831907552238934395000039155541269937" + , "77914486216152383968400591704791756847610018757857965758408455442143531631" + , "238365259321088298905088250444146071545398991768186471689605160523129613763" + , "279409375422154593510552116666741774885392805494152031820287626934209978908" + , "195118776021452609708543280509123101181249086555819844047203390788132717252" + , "197977884437087886153482042453896079316138251415359773453987232734849953584" + , "168185043240980087638006965666857387510828226074470344841850764460631595331" + , "231157923359356077977363679818678437836536420121744865399935742538602805912" + , "177903771863742191900138437188329108771172098110036075491750018158192424072" + , "313552174443290416730632310997197097951229162137491595709733420111980331403" + , "273253450712049988786741336540196077743997302924525995219038781650977490211" + , "421908030281055821389377531613150504859996607596444776050212044919345332385" + , "180108184992593746898140529947178182204857361841304042854173884504394805936" + , "37075272799330399065679301151342697855905822543084867570322173216259074746" + , "364885615491975468180698816037289079302391684470748470356247509218051645743" + , "397482868106190800111749908137311511782294652288655656060379563261618687603" + , "192853269627895017416381451198403197013798085262867793715522216458130791820" + , "450480853450142791928572953497280890976328598410525218090104787739409705079" + , "40278654070502961330170439514434669768416784968274956021022493731441898222" + , "251277143131769020481025315216040765839561111684608785317366609258959942695" + , "95094468748825454459610961968601800404132682484160170977941285794444806916" + , "160586633865113902389134480029183924655750088163491531655080014223476604929" + , "211661229493873434581423107377168478957907088187044576032505407590783850232" + , "409651293631434750272174456674594508340952460788494035327119354167465019826" + , "233213211946836553080627522409887799285199986120567245752841080275284294566" + , "143182900674482411759481361336063079267405785923487419697568907351386146653" + , "430050085956999990041799366370428972519385994997821389120583306252090911051" + , "241257468571530133762629460194062384921386438426367660851087853915892684115" + , "106478922860328643074356032194102718325829441005019365153538120054339275205" + , "252933430690486626644000908036895289326526510137514385824014300035301154822" + , "242924628511152862437189415942615812459145003511499320946981326550434266392" + , "107566" + ] + , [ "10892488375325920610152294701785926476935321890453222549428070867493882259" + , "230776541958253414701326765204413805639078570664616139597900921490475143840" + , "162235550819840758141721889536295480113278275911087429090017541814695333320" + , "318634611531007856220026646570492630940047240387334221027051366284225674524" + , "347695480420330337439096561080975864031317110001559084872978387417651465445" + , "243301070227446762125488369714708670219289121859111929877695012393609726208" + , "312153141205681954392441579373652470334482191990502803524039048245142338874" + , "243769659456658813016931656268556893289414122189859136776671772112510762644" + , "235510946617019540983239794402008700611014664578713646813426116406215382253" + , "394638234040056271265534896894991100558052611842099314808878257003754175212" + , "112730195097163222179949980634599934392634120069300673310070655800491242211" + , "112545144551723145227061757291353149296490850338535641681237178578430772288" + , "399161925498018051746424503488168548076537557369822821644005390567188305750" + , "291823556779130095044851238536860577785281148245623338113991004798525195947" + , "443006765181360772964201996695219533539991249361936300010158089981376772939" + , "74018417655448362012716269253153545524112649147848318337218250865231619883" + , "361038295627629757439073080656763633087074408866229383288288831546300069767" + , "269655542872834422597617091710145830035767834433114567250727497135451412216" + , "58289072717559976781527037332370898163550414718655391280446986067842922181" + , "365399954331278626941053447122605263207706937018525782908294459265663426953" + , "83576501872896181822963149907518188809522341045703363171261203985876068484" + , "203403783686919490357886887779316222746544665267595235714814853282937072937" + , "226090172488558641139782927103632452136731207206963592401497570070700221117" + , "249813560776802008219945831355163226098682741553305882699128946352901227282" + , "236586835155013316983057471119105606475813711035522306026673814184519826069" + , "420611449257527132395951061920868895981487081726689195426673433565077012458" + , "414979562418422585161189066520079202660418565693776869373399931253524536378" + , "115851377630895049619958829726730418778383692593973233290077769966938018584" + , "248071158447148977966329235335508229928647083339924308951291087789494073866" + , "8254100651607835318906499132096926759832050649688561036854000785129084907" + , "91385483239630999205401307763890943367451672856478206313711901403403429289" + , "369346641925770698935046191632374874762045839747680407985443471202350286304" + , "236809023553698844817613980395588655518336237009075203833474757804664254158" + , "8367847400805682648908349286699722579431227561083498361702537964306290078" + , "599241730770400067632779114435635342549228985534229813617556932580328166" + , "347112528350917448294202348829052279076907614831011498643025223836596915573" + , "384244379244118003891043669466323943736726794634167201471569326059716944701" + , "118013777197672343498581960057939216208494837962825017767101107204031333144" + , "27234916267695376599463409893017377196853108034589808756909998459364893467" + , "443519198016088819704735929590164254445884637317806160485888215392818578737" + , "396780482611567392375183169345153676737175342167284140440545202776279411157" + , "420351155303051883480975795437743307852799862858964108014000673383502660760" + , "17379377743250873773932440622865094720355292220433235366224143179854831702" + , "299671454782683147939928632170233327590769402224392134648893444626929909373" + , "143062753141414050359792615867774312517100868919516205179025540179759009492" + , "79497692490953838158801094558761984613913564034406069659969793097043605498" + , "422748645389700647011491406944374966856916994331478229959954030359911549565" + , "101802829812014644970197499895811874607753186302439171072935333706660468030" + , "376428369998893026519415315112012919906032811618495880392785036762185101192" + , "193969030999254195249242252871597931610859408264053152789041067245597391073" + , "262277607928686742238285487190873200602833495734085188071246746209841324139" + , "154099884960502807271641574310268486840763221700920893692135020347157046386" + , "155875061164585018658671842995328931296342883770473498362059838106632382461" + , "248574435283666782825705601259695525637993294311364397935499480206725256362" + , "171325185063038052248966557755722232979612702743265316145563443527135798688" + , "19982746887818897250405980185937061235439217109294376948752373205830077881" + , "363719103724181291346130833008745948141602173373912337315631878022251200824" + , "174596812883797666354579747966720458118607233660798323559531788300018084931" + , "296611197821867585469311917529661698312828606304722408477045992233526328708" + , "115884038550627840768260751168697204665962542002024023842649383174518895165" + , "265597417366164841889730505737916646261040505851159477903649773521314216810" + , "59890857222664166616144499778264545115438678877460924559608721809777680238" + , "150275344313515259978222149421664752546204516114655782705875535407472455999" + , "119762211657733951640135703777940013374803447228667976561992857374027112851" + , "124750313254270944205036764607428674226393862430770588148329786501973600535" + , "223562415856611692667255745278751292230311455355383541649772348032933666931" + , "70851" + ] + ] +, "merklePaths": + [ [ "12330511756602656312909435206144412037562550923861053314147193494605624608532" + , "11626412651279744307149389430094868876817959605147995292836995419396445628874" + , "5992799448428980485292103987999399446160713735809250167288027256759377161164" + , "19665782623633007009046423286205585606029554995684266199914536255425604862856" + , "16487082247902739801276638140231448150060608377815926330854457096549924699346" + , "13757776896542890425183206586238760231244468647794123671233758868377423038254" + , "5689382212199289790852870716648320118282913659439556386010574440270030991956" + , "19397819444005071538172083272808000152189643623980212079104170950073560541073" + , "13602141253349313166682170066159161155345481788270338632198566205283140117430" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + ] + , [ "20475873274412005481064639175076772466269555202404881992985142339142752174247" + , "16346160910918020455716115524174351294558871582658635761456705803199256575588" + , "2853750013041818757293788273269978233503065226276819540991640203844566736443" + , "9192572535522846104757923561847416578417599996904208474460700268961782558170" + , "11041850361074018956732434352769265303294549935255362322653487210796196161858" + , "20835509643844784930831626622202658364476409300598072395494952478408974334325" + , "15426115581767819720710837762133134950520914636073261355445708100826108573907" + , "7565353224987902191368863653499353764559862741092477570130316358454603122676" + , "2622681935585012630617774892501744551457568716225188460692779556142778732663" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + ] + , [ "13099868869639061574800067722436547911616384523753701384396275064543709640456" + , "353757809120595355213201328586632712724405232919181040928026587840976194078" + , "17653300914565730132855106316678548541847283141888106932466281385199556950861" + , "15467462085462582082877261755656498905479817107855355753427299990712166382496" + , "8291733777946446853018893495264026584437749231931118771866890345692346711355" + , "15510790697317206014779022286261864844918915222875014882833700758879700055506" + , "5689382212199289790852870716648320118282913659439556386010574440270030991956" + , "19397819444005071538172083272808000152189643623980212079104170950073560541073" + , "13602141253349313166682170066159161155345481788270338632198566205283140117430" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + ] + , [ "123238869181525326412236116167249816873084559218151119452851092131080991962" + , "9610314342084317647296061595824603740114670828357751076517430572434680540425" + , "16802740554584732104294972716558962567961331277692246600846665155168171370476" + , "151083360419914122898584757765086723506432610661508069194962432698872036623" + , "10357032992337239725601662829902169825217513617307319193581711776597892496381" + , "10120699018002766520605012835043517238241846918467244955580419060582311503402" + , "21149604008153751948441881526949680605328007895979738537313721955134548786062" + , "5720106921878932614189421948890362637585879521377362100104826996201092964473" + , "2622681935585012630617774892501744551457568716225188460692779556142778732663" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + ] + , [ "19153513112714782931111012244694922243101677748840856395814929006033044311081" + , "21046138187228318287277629107063936540039891592394801899272249280765102572688" + , "18057980437430910028015917806534512217725128031222973066601095455076586015436" + , "5766677914654397407881589917461473873246279171605373166264025525757502238061" + , "12019967669236656188577515900815533059046454955207846938479617973037184411021" + , "14504305765289705714959523666100275156034056689367568164630385862257567596209" + , "7152002871325824138073253423783370852632926621899161541618248808716037342022" + , "9714587356194206699401761190093056901650105401919163689816999407566849779455" + , "13602141253349313166682170066159161155345481788270338632198566205283140117430" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + , "0" + ] + ] +} diff --git a/tests/circuits/fixtures/proof_main.r1cs b/tests/circuits/fixtures/proof_main.r1cs new file mode 100644 index 00000000..8b58ffa8 Binary files /dev/null and b/tests/circuits/fixtures/proof_main.r1cs differ diff --git a/tests/circuits/fixtures/proof_main.wasm b/tests/circuits/fixtures/proof_main.wasm new file mode 100644 index 00000000..f908d4f7 Binary files /dev/null and b/tests/circuits/fixtures/proof_main.wasm differ diff --git a/tests/circuits/fixtures/proof_main.zkey b/tests/circuits/fixtures/proof_main.zkey new file mode 100644 index 00000000..e4b889e9 Binary files /dev/null and b/tests/circuits/fixtures/proof_main.zkey differ diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index d866de70..9ba29e5d 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -2,28 +2,32 @@ import std/sequtils import std/sugar import std/tables -import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/libp2p/errors import pkg/codex/rng import pkg/codex/stores import pkg/codex/blockexchange import pkg/codex/chunker +import pkg/codex/manifest +import pkg/codex/merkletree import pkg/codex/blocktype as bt -import ../../helpers/mockdiscovery - +import ../../../asynctest import ../../helpers +import ../../helpers/mockdiscovery import ../../examples -suite "Block Advertising and Discovery": + +asyncchecksuite "Block Advertising and Discovery": let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) var blocks: seq[bt.Block] + manifest: Manifest + tree: CodexTree + manifestBlock: bt.Block switch: Switch peerStore: PeerCtxStore blockDiscovery: MockDiscovery @@ -46,10 +50,16 @@ suite "Block Advertising and Discovery": blockDiscovery = MockDiscovery.new() wallet = WalletRef.example network = BlockExcNetwork.new(switch) - localStore = CacheStore.new(blocks.mapIt( it )) + localStore = CacheStore.new(blocks.mapIt(it)) peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() + (manifest, tree) = makeManifestAndTree(blocks).tryGet() + manifestBlock = bt.Block.new( + manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + + (await localStore.putBlock(manifestBlock)).tryGet() + discovery = DiscoveryEngine.new( localStore, peerStore, @@ -82,26 +92,40 @@ suite "Block Advertising and Discovery": blockDiscovery.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = - await engine.resolveBlocks(blocks.filterIt( it.cid == cid )) + await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) await allFuturesThrowing( allFinished(pendingBlocks)) await engine.stop() - test "Should advertise have blocks": + test "Should advertise both manifests and trees": let + cids = @[manifest.cid.tryGet, manifest.treeCid] advertised = initTable.collect: - for b in blocks: {b.cid: newFuture[void]()} + for cid in cids: {cid: newFuture[void]()} - blockDiscovery.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = - if cid in advertised and not advertised[cid].finished(): - advertised[cid].complete() + blockDiscovery + .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = + if cid in advertised and not advertised[cid].finished(): + advertised[cid].complete() - await engine.start() # fire up advertise loop + await engine.start() await allFuturesThrowing( allFinished(toSeq(advertised.values))) + await engine.stop() + test "Should not advertise local blocks": + let + blockCids = blocks.mapIt(it.cid) + + blockDiscovery + .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = + check: + cid notin blockCids + + await engine.start() + await sleepAsync(3.seconds) await engine.stop() test "Should not launch discovery if remote peer has block": @@ -109,10 +133,10 @@ suite "Block Advertising and Discovery": pendingBlocks = blocks.mapIt( engine.pendingBlocks.getWantHandle(it.cid) ) - peerId = PeerID.example + peerId = PeerId.example haves = collect(initTable()): for blk in blocks: - { blk.cid: Presence(cid: blk.cid, price: 0.u256) } + { blk.address: Presence(address: blk.address, price: 0.u256) } engine.peers.add( BlockExcPeerCtx( @@ -124,31 +148,41 @@ suite "Block Advertising and Discovery": proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] = check false - await engine.start() # fire up discovery loop - engine.pendingBlocks.resolve(blocks) + await engine.start() + engine.pendingBlocks.resolve(blocks.mapIt(BlockDelivery(blk: it, address: it.address))) await allFuturesThrowing( allFinished(pendingBlocks)) await engine.stop() -suite "E2E - Multiple Nodes Discovery": - let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) +proc asBlock(m: Manifest): bt.Block = + let mdata = m.encode().tryGet() + bt.Block.new(data = mdata, codec = ManifestCodec).tryGet() +asyncchecksuite "E2E - Multiple Nodes Discovery": var switch: seq[Switch] blockexc: seq[NetworkStore] - blocks: seq[bt.Block] + manifests: seq[Manifest] + mBlocks: seq[bt.Block] + trees: seq[CodexTree] setup: - while true: - let chunk = await chunker.getBytes() - if chunk.len <= 0: - break - - blocks.add(bt.Block.new(chunk).tryGet()) - for _ in 0..<4: + let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) + var blocks = newSeq[bt.Block]() + while true: + let chunk = await chunker.getBytes() + if chunk.len <= 0: + break + + blocks.add(bt.Block.new(chunk).tryGet()) + let (manifest, tree) = makeManifestAndTree(blocks).tryGet() + manifests.add(manifest) + mBlocks.add(manifest.asBlock()) + trees.add(tree) + let s = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr}) blockDiscovery = MockDiscovery.new() @@ -182,51 +216,12 @@ suite "E2E - Multiple Nodes Discovery": teardown: switch = @[] blockexc = @[] + manifests = @[] + mBlocks = @[] + trees = @[] test "E2E - Should advertise and discover blocks": - # Distribute the blocks amongst 1..3 - # Ask 0 to download everything without connecting him beforehand - - var advertised: Table[Cid, SignedPeerRecord] - - MockDiscovery(blockexc[1].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised.add(cid, switch[1].peerInfo.signedPeerRecord) - - MockDiscovery(blockexc[2].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised.add(cid, switch[2].peerInfo.signedPeerRecord) - - MockDiscovery(blockexc[3].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised.add(cid, switch[3].peerInfo.signedPeerRecord) - - await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5]) - await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10]) - await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15]) - - MockDiscovery(blockexc[0].engine.discovery.discovery) - .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - result.add(advertised[cid]) - - let futs = collect(newSeq): - for b in blocks: - blockexc[0].engine.requestBlock(b.cid) - - await allFuturesThrowing( - switch.mapIt( it.start() ) & - blockexc.mapIt( it.engine.start() )) - - await allFutures(futs) - - await allFuturesThrowing( - blockexc.mapIt( it.engine.stop() ) & - switch.mapIt( it.stop() )) - - test "E2E - Should advertise and discover blocks with peers already connected": - # Distribute the blocks amongst 1..3 + # Distribute the manifests and trees amongst 1..3 # Ask 0 to download everything without connecting him beforehand var advertised: Table[Cid, SignedPeerRecord] @@ -243,9 +238,61 @@ suite "E2E - Multiple Nodes Discovery": .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = advertised[cid] = switch[3].peerInfo.signedPeerRecord - await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5]) - await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10]) - await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15]) + discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) + await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid))]) + + discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) + await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid))]) + + discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) + await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid))]) + + MockDiscovery(blockexc[0].engine.discovery.discovery) + .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): + Future[seq[SignedPeerRecord]] {.async.} = + if cid in advertised: + result.add(advertised[cid]) + + let futs = collect(newSeq): + for m in mBlocks[0..2]: + blockexc[0].engine.requestBlock(m.cid) + + await allFuturesThrowing( + switch.mapIt(it.start()) & + blockexc.mapIt(it.engine.start())).wait(10.seconds) + + await allFutures(futs).wait(10.seconds) + + await allFuturesThrowing( + blockexc.mapIt(it.engine.stop()) & + switch.mapIt(it.stop())).wait(10.seconds) + + test "E2E - Should advertise and discover blocks with peers already connected": + # Distribute the blocks amongst 1..3 + # Ask 0 to download everything *WITH* connecting him beforehand + + var advertised: Table[Cid, SignedPeerRecord] + + MockDiscovery(blockexc[1].engine.discovery.discovery) + .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = + advertised[cid] = switch[1].peerInfo.signedPeerRecord + + MockDiscovery(blockexc[2].engine.discovery.discovery) + .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = + advertised[cid] = switch[2].peerInfo.signedPeerRecord + + MockDiscovery(blockexc[3].engine.discovery.discovery) + .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = + advertised[cid] = switch[3].peerInfo.signedPeerRecord + + discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) + await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid))]) + + discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) + await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid))]) + + discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) + await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid))]) MockDiscovery(blockexc[0].engine.discovery.discovery) .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): @@ -254,14 +301,14 @@ suite "E2E - Multiple Nodes Discovery": return @[advertised[cid]] let - futs = blocks.mapIt( blockexc[0].engine.requestBlock( it.cid ) ) + futs = mBlocks[0..2].mapIt(blockexc[0].engine.requestBlock(it.cid)) await allFuturesThrowing( - switch.mapIt( it.start() ) & - blockexc.mapIt( it.engine.start() )) + switch.mapIt(it.start()) & + blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) await allFuturesThrowing( - blockexc.mapIt( it.engine.stop() ) & - switch.mapIt( it.stop() )) + blockexc.mapIt(it.engine.stop()) & + switch.mapIt(it.stop())).wait(10.seconds) diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index a8f59fec..ff6b60d2 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -2,10 +2,7 @@ import std/sequtils import std/sugar import std/tables -import pkg/asynctest - import pkg/chronos -import pkg/libp2p import pkg/codex/rng import pkg/codex/stores @@ -13,22 +10,30 @@ import pkg/codex/blockexchange import pkg/codex/chunker import pkg/codex/blocktype as bt import pkg/codex/blockexchange/engine +import pkg/codex/manifest +import pkg/codex/merkletree -import ../../helpers/mockdiscovery - +import ../../../asynctest import ../../helpers +import ../../helpers/mockdiscovery import ../../examples -suite "Test Discovery Engine": +proc asBlock(m: Manifest): bt.Block = + let mdata = m.encode().tryGet() + bt.Block.new(data = mdata, codec = ManifestCodec).tryGet() + +asyncchecksuite "Test Discovery Engine": let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) var blocks: seq[bt.Block] + manifest: Manifest + tree: CodexTree + manifestBlock: bt.Block switch: Switch peerStore: PeerCtxStore blockDiscovery: MockDiscovery pendingBlocks: PendingBlocksManager - localStore: CacheStore network: BlockExcNetwork setup: @@ -39,6 +44,10 @@ suite "Test Discovery Engine": blocks.add(bt.Block.new(chunk).tryGet()) + (manifest, tree) = makeManifestAndTree(blocks).tryGet() + manifestBlock = manifest.asBlock() + blocks.add(manifestBlock) + switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr}) network = BlockExcNetwork.new(switch) peerStore = PeerCtxStore.new() @@ -55,11 +64,11 @@ suite "Test Discovery Engine": blockDiscovery, pendingBlocks, discoveryLoopSleep = 100.millis) - wants = blocks.mapIt( pendingBlocks.getWantHandle(it.cid) ) + wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid) ) blockDiscovery.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - pendingBlocks.resolve(blocks.filterIt( it.cid == cid)) + pendingBlocks.resolve(blocks.filterIt(it.cid == cid).mapIt(BlockDelivery(blk: it, address: it.address))) await discoveryEngine.start() await allFuturesThrowing(allFinished(wants)).wait(1.seconds) @@ -67,7 +76,7 @@ suite "Test Discovery Engine": test "Should Advertise Haves": var - localStore = CacheStore.new(blocks.mapIt( it )) + localStore = CacheStore.new(blocks.mapIt(it)) discoveryEngine = DiscoveryEngine.new( localStore, peerStore, @@ -76,8 +85,8 @@ suite "Test Discovery Engine": pendingBlocks, discoveryLoopSleep = 100.millis) haves = collect(initTable): - for b in blocks: - { b.cid: newFuture[void]() } + for cid in @[manifestBlock.cid, manifest.treeCid]: + { cid: newFuture[void]() } blockDiscovery.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} = @@ -112,28 +121,6 @@ suite "Test Discovery Engine": await want.wait(1.seconds) await discoveryEngine.stop() - test "Should queue advertise request": - var - localStore = CacheStore.new(@[blocks[0]]) - discoveryEngine = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks, - discoveryLoopSleep = 100.millis) - have = newFuture[void]() - - blockDiscovery.publishBlockProvideHandler = - proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} = - check cid == blocks[0].cid - if not have.finished: - have.complete() - - await discoveryEngine.start() - await have.wait(1.seconds) - await discoveryEngine.stop() - test "Should not request more than minPeersPerBlock": var localStore = CacheStore.new() @@ -154,9 +141,11 @@ suite "Test Discovery Engine": check cid == blocks[0].cid check peerStore.len < minPeers var - peerCtx = BlockExcPeerCtx(id: PeerID.example) + peerCtx = BlockExcPeerCtx(id: PeerId.example) - peerCtx.blocks[cid] = Presence(cid: cid, price: 0.u256) + let address = BlockAddress(leaf: false, cid: cid) + + peerCtx.blocks[address] = Presence(address: address, price: 0.u256) peerStore.add(peerCtx) want.fire() diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index 9965979b..105f3a8e 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -1,13 +1,9 @@ import std/sequtils import std/algorithm -import pkg/asynctest import pkg/chronos import pkg/stew/byteutils -import pkg/libp2p -import pkg/libp2p/errors - import pkg/codex/rng import pkg/codex/stores import pkg/codex/blockexchange @@ -15,14 +11,11 @@ import pkg/codex/chunker import pkg/codex/discovery import pkg/codex/blocktype as bt +import ../../../asynctest import ../../examples import ../../helpers -suite "NetworkStore engine - 2 nodes": - let - chunker1 = RandomChunker.new(Rng.instance(), size = 2048, chunkSize = 256) - chunker2 = RandomChunker.new(Rng.instance(), size = 2048, chunkSize = 256) - +asyncchecksuite "NetworkStore engine - 2 nodes": var nodeCmps1, nodeCmps2: NodesComponents peerCtx1, peerCtx2: BlockExcPeerCtx @@ -31,20 +24,8 @@ suite "NetworkStore engine - 2 nodes": pendingBlocks1, pendingBlocks2: seq[Future[bt.Block]] setup: - while true: - let chunk = await chunker1.getBytes() - if chunk.len <= 0: - break - - blocks1.add(bt.Block.new(chunk).tryGet()) - - while true: - let chunk = await chunker2.getBytes() - if chunk.len <= 0: - break - - blocks2.add(bt.Block.new(chunk).tryGet()) - + blocks1 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) + blocks2 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) nodeCmps1 = generateNodes(1, blocks1)[0] nodeCmps2 = generateNodes(1, blocks2)[0] @@ -123,10 +104,11 @@ suite "NetworkStore engine - 2 nodes": test "Should send want-have for block": let blk = bt.Block.new("Block 1".toBytes).tryGet() + let blkFut = nodeCmps1.pendingBlocks.getWantHandle( blk.cid ) (await nodeCmps2.localStore.putBlock(blk)).tryGet() - let entry = Entry( - `block`: blk.cid.data.buffer, + let entry = WantListEntry( + address: blk.address, priority: 1, cancel: false, wantType: WantType.WantBlock, @@ -139,6 +121,7 @@ suite "NetworkStore engine - 2 nodes": .pushOrUpdateNoWait(peerCtx1).isOk check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet() + check eventually (await blkFut) == blk test "Should get blocks from remote": let @@ -169,13 +152,10 @@ suite "NetworkStore engine - 2 nodes": nodeCmps2.networkStore.putBlock(it) )) - let - blocks = await allFinished( - blocks2[4..7].mapIt( - nodeCmps1.networkStore.getBlock(it.cid) - )) - - # await sleepAsync(10.seconds) + discard await allFinished( + blocks2[4..7].mapIt( + nodeCmps1.networkStore.getBlock(it.cid) + )) let channel = !peerCtx1.paymentChannel @@ -183,43 +163,31 @@ suite "NetworkStore engine - 2 nodes": check eventually wallet.balance(channel, Asset) > 0 -suite "NetworkStore - multiple nodes": - let - chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) - +asyncchecksuite "NetworkStore - multiple nodes": var - switch: seq[Switch] - networkStore: seq[NetworkStore] + nodes: seq[NodesComponents] blocks: seq[bt.Block] setup: - while true: - let chunk = await chunker.getBytes() - if chunk.len <= 0: - break - - blocks.add(bt.Block.new(chunk).tryGet()) - - for e in generateNodes(5): - switch.add(e.switch) - networkStore.add(e.networkStore) + blocks = await makeRandomBlocks(datasetSize = 4096, blockSize = 256'nb) + nodes = generateNodes(5) + for e in nodes: await e.engine.start() await allFuturesThrowing( - switch.mapIt( it.start() ) + nodes.mapIt( it.switch.start() ) ) teardown: await allFuturesThrowing( - switch.mapIt( it.stop() ) + nodes.mapIt( it.switch.stop() ) ) - switch = @[] - networkStore = @[] + nodes = @[] test "Should receive blocks for own want list": let - downloader = networkStore[4] + downloader = nodes[4].networkStore engine = downloader.engine # Add blocks from 1st peer to want list @@ -237,9 +205,9 @@ suite "NetworkStore - multiple nodes": ) for i in 0..15: - (await networkStore[i div 4].engine.localStore.putBlock(blocks[i])).tryGet() + (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() - await connectNodes(switch) + await connectNodes(nodes) await sleepAsync(1.seconds) await allFuturesThrowing( @@ -255,7 +223,7 @@ suite "NetworkStore - multiple nodes": test "Should exchange blocks with multiple nodes": let - downloader = networkStore[4] + downloader = nodes[4].networkStore engine = downloader.engine # Add blocks from 1st peer to want list @@ -268,9 +236,9 @@ suite "NetworkStore - multiple nodes": ) for i in 0..15: - (await networkStore[i div 4].engine.localStore.putBlock(blocks[i])).tryGet() + (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() - await connectNodes(switch) + await connectNodes(nodes) await sleepAsync(1.seconds) await allFuturesThrowing( @@ -279,3 +247,47 @@ suite "NetworkStore - multiple nodes": check pendingBlocks1.mapIt( it.read ) == blocks[0..3] check pendingBlocks2.mapIt( it.read ) == blocks[12..15] + + test "Should actively cancel want-haves if block received from elsewhere": + let + # Peer wanting to download blocks + downloader = nodes[4] + # Bystander peer - gets block request but can't satisfy them + bystander = nodes[3] + # Holder of actual blocks + blockHolder = nodes[1] + + let aBlock = blocks[0] + (await blockHolder.engine.localStore.putBlock(aBlock)).tryGet() + + await connectNodes(@[downloader, bystander]) + # Downloader asks for block... + let blockRequest = downloader.engine.requestBlock(aBlock.cid) + + # ... and bystander learns that downloader wants it, but can't provide it. + check eventually( + bystander + .engine + .peers + .get(downloader.switch.peerInfo.peerId) + .peerWants + .filterIt( it.address == aBlock.address ) + .len == 1 + ) + + # As soon as we connect the downloader to the blockHolder, the block should + # propagate to the downloader... + await connectNodes(@[downloader, blockHolder]) + check (await blockRequest).tryGet().cid == aBlock.cid + check (await downloader.engine.localStore.hasBlock(aBlock.cid)).tryGet() + + # ... and the bystander should have cancelled the want-have + check eventually( + bystander + .engine + .peers + .get(downloader.switch.peerInfo.peerId) + .peerWants + .filterIt( it.address == aBlock.address ) + .len == 0 + ) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index 212dc8c3..5bf02b1b 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -3,41 +3,42 @@ import std/random import std/algorithm import pkg/stew/byteutils -import pkg/asynctest import pkg/chronos -import pkg/libp2p +import pkg/libp2p/errors import pkg/libp2p/routing_record -import pkg/libp2pdht/discv5/protocol as discv5 +import pkg/codexdht/discv5/protocol as discv5 import pkg/codex/rng import pkg/codex/blockexchange import pkg/codex/stores import pkg/codex/chunker import pkg/codex/discovery -import pkg/codex/blocktype as bt +import pkg/codex/blocktype import pkg/codex/utils/asyncheapqueue +import pkg/codex/manifest +import ../../../asynctest import ../../helpers import ../../examples -suite "NetworkStore engine basic": +asyncchecksuite "NetworkStore engine basic": var rng: Rng seckey: PrivateKey - peerId: PeerID + peerId: PeerId chunker: Chunker wallet: WalletRef blockDiscovery: Discovery peerStore: PeerCtxStore pendingBlocks: PendingBlocksManager - blocks: seq[bt.Block] + blocks: seq[Block] done: Future[void] setup: rng = Rng.instance() seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() - chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) + peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb) wallet = WalletRef.example blockDiscovery = Discovery.new() peerStore = PeerCtxStore.new() @@ -48,20 +49,20 @@ suite "NetworkStore engine basic": if chunk.len <= 0: break - blocks.add(bt.Block.new(chunk).tryGet()) + blocks.add(Block.new(chunk).tryGet()) done = newFuture[void]() test "Should send want list to new peers": proc sendWantList( - id: PeerID, - cids: seq[Cid], + id: PeerId, + addresses: seq[BlockAddress], priority: int32 = 0, cancel: bool = false, wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false) {.gcsafe, async.} = - check cids.mapIt($it).sorted == blocks.mapIt( $it.cid ).sorted + check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt( $it.cid ).sorted done.complete() let @@ -94,7 +95,7 @@ suite "NetworkStore engine basic": test "Should send account to new peers": let pricing = Pricing.example - proc sendAccount(peer: PeerID, account: Account) {.gcsafe, async.} = + proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} = check account.address == pricing.address done.complete() @@ -125,11 +126,11 @@ suite "NetworkStore engine basic": await done.wait(100.millis) -suite "NetworkStore engine handlers": +asyncchecksuite "NetworkStore engine handlers": var rng: Rng seckey: PrivateKey - peerId: PeerID + peerId: PeerId chunker: Chunker wallet: WalletRef blockDiscovery: Discovery @@ -140,22 +141,26 @@ suite "NetworkStore engine handlers": discovery: DiscoveryEngine peerCtx: BlockExcPeerCtx localStore: BlockStore - done: Future[void] - blocks: seq[bt.Block] + blocks: seq[Block] + + const NopSendWantCancellationsProc = proc( + id: PeerId, + addresses: seq[BlockAddress] + ) {.gcsafe, async.} = discard setup: rng = Rng.instance() - chunker = RandomChunker.new(rng, size = 1024, chunkSize = 256) + chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) while true: let chunk = await chunker.getBytes() if chunk.len <= 0: break - blocks.add(bt.Block.new(chunk).tryGet()) + blocks.add(Block.new(chunk).tryGet()) seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() wallet = WalletRef.example blockDiscovery = Discovery.new() peerStore = PeerCtxStore.new() @@ -194,7 +199,7 @@ suite "NetworkStore engine handlers": let ctx = await engine.taskQueue.pop() check ctx.id == peerId # only `wantBlock` scheduled - check ctx.peerWants.mapIt( it.cid ) == blocks.mapIt( it.cid ) + check ctx.peerWants.mapIt( it.address.cidOrTreeCid ) == blocks.mapIt( it.cid ) let done = handler() await engine.wantListHandler(peerId, wantList) @@ -205,8 +210,8 @@ suite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt( it.cid )) - proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` ) + proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address ) done.complete() engine.network = BlockExcNetwork( @@ -227,8 +232,8 @@ suite "NetworkStore engine handlers": blocks.mapIt( it.cid ), sendDontHave = true) - proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` ) + proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address ) for p in presence: check: p.`type` == BlockPresenceType.DontHave @@ -249,13 +254,9 @@ suite "NetworkStore engine handlers": blocks.mapIt( it.cid ), sendDontHave = true) - proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = - let - cid1Buf = blocks[0].cid.data.buffer - cid2Buf = blocks[1].cid.data.buffer - + proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = for p in presence: - if p.cid != cid1Buf and p.cid != cid2Buf: + if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid: check p.`type` == BlockPresenceType.DontHave else: check p.`type` == BlockPresenceType.Have @@ -278,7 +279,13 @@ suite "NetworkStore engine handlers": engine.pendingBlocks.getWantHandle( it.cid ) ) - await engine.blocksHandler(peerId, blocks) + let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + + # Install NOP for want list cancellations so they don't cause a crash + engine.network = BlockExcNetwork( + request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)) + + await engine.blocksDeliveryHandler(peerId, blocksDelivery) let resolved = await allFinished(pending) check resolved.mapIt( it.read ) == blocks for b in blocks: @@ -293,41 +300,47 @@ suite "NetworkStore engine handlers": peerContext.account = account.some peerContext.blocks = blocks.mapIt( - (it.cid, Presence(cid: it.cid, price: rand(uint16).u256)) + (it.address, Presence(address: it.address, price: rand(uint16).u256)) ).toTable engine.network = BlockExcNetwork( request: BlockExcRequest( - sendPayment: proc(receiver: PeerID, payment: SignedState) {.gcsafe, async.} = + sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = let amount = blocks.mapIt( - peerContext.blocks[it.cid].price + peerContext.blocks[it.address].price ).foldl(a + b) balances = !payment.state.outcome.balances(Asset) check receiver == peerId check balances[account.address.toDestination] == amount - done.complete() + done.complete(), + + # Install NOP for want list cancellations so they don't cause a crash + sendWantCancellations: NopSendWantCancellationsProc )) - await engine.blocksHandler(peerId, blocks) + await engine.blocksDeliveryHandler(peerId, blocks.mapIt( + BlockDelivery(blk: it, address: it.address))) await done.wait(100.millis) test "Should handle block presence": var - handles: Table[Cid, Future[bt.Block]] + handles: Table[Cid, Future[Block]] proc sendWantList( - id: PeerID, - cids: seq[Cid], + id: PeerId, + addresses: seq[BlockAddress], priority: int32 = 0, cancel: bool = false, wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false) {.gcsafe, async.} = - engine.pendingBlocks.resolve(blocks.filterIt( it.cid in cids )) + engine.pendingBlocks.resolve(blocks + .filterIt( it.address in addresses ) + .mapIt(BlockDelivery(blk: it, address: it.address))) engine.network = BlockExcNetwork( request: BlockExcRequest( @@ -344,20 +357,89 @@ suite "NetworkStore engine handlers": blocks.mapIt( PresenceMessage.init( Presence( - cid: it.cid, + address: it.address, have: true, price: price )))) - for cid in blocks.mapIt(it.cid): - check cid in peerCtx.peerHave - check peerCtx.blocks[cid].price == price + for a in blocks.mapIt(it.address): + check a in peerCtx.peerHave + check peerCtx.blocks[a].price == price -suite "Task Handler": + test "Should send cancellations for received blocks": + let + pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) + blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + cancellations = newTable( + blocks.mapIt((it.address, newFuture[void]())).toSeq + ) + + proc sendWantCancellations( + id: PeerId, + addresses: seq[BlockAddress] + ) {.gcsafe, async.} = + for address in addresses: + cancellations[address].complete() + + engine.network = BlockExcNetwork( + request: BlockExcRequest( + sendWantCancellations: sendWantCancellations + )) + + await engine.blocksDeliveryHandler(peerId, blocksDelivery) + discard await allFinished(pending) + await allFuturesThrowing(cancellations.values().toSeq) + + test "resolveBlocks should queue manifest CIDs for discovery": + engine.network = BlockExcNetwork( + request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)) + + let + manifest = Manifest.new( + treeCid = Cid.example, + blockSize = 123.NBytes, + datasetSize = 234.NBytes + ) + + let manifestBlk = Block.new(data = manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + let blks = @[manifestBlk] + + await engine.resolveBlocks(blks) + + check: + manifestBlk.cid in engine.discovery.advertiseQueue + + test "resolveBlocks should queue tree CIDs for discovery": + engine.network = BlockExcNetwork( + request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)) + + let + tCid = Cid.example + delivery = BlockDelivery(blk: Block.example, address: BlockAddress(leaf: true, treeCid: tCid)) + + await engine.resolveBlocks(@[delivery]) + + check: + tCid in engine.discovery.advertiseQueue + + test "resolveBlocks should not queue non-manifest non-tree CIDs for discovery": + engine.network = BlockExcNetwork( + request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)) + + let + blkCid = Cid.example + delivery = BlockDelivery(blk: Block.example, address: BlockAddress(leaf: false, cid: blkCid)) + + await engine.resolveBlocks(@[delivery]) + + check: + blkCid notin engine.discovery.advertiseQueue + +asyncchecksuite "Task Handler": var rng: Rng seckey: PrivateKey - peerId: PeerID + peerId: PeerId chunker: Chunker wallet: WalletRef blockDiscovery: Discovery @@ -366,25 +448,24 @@ suite "Task Handler": network: BlockExcNetwork engine: BlockExcEngine discovery: DiscoveryEngine - peerCtx: BlockExcPeerCtx localStore: BlockStore peersCtx: seq[BlockExcPeerCtx] - peers: seq[PeerID] - blocks: seq[bt.Block] + peers: seq[PeerId] + blocks: seq[Block] setup: rng = Rng.instance() - chunker = RandomChunker.new(rng, size = 1024, chunkSize = 256) + chunker = RandomChunker.new(rng, size = 1024, chunkSize = 256'nb) while true: let chunk = await chunker.getBytes() if chunk.len <= 0: break - blocks.add(bt.Block.new(chunk).tryGet()) + blocks.add(Block.new(chunk).tryGet()) seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() wallet = WalletRef.example blockDiscovery = Discovery.new() peerStore = PeerCtxStore.new() @@ -411,7 +492,7 @@ suite "Task Handler": for i in 0..3: let seckey = PrivateKey.random(rng[]).tryGet() - peers.add(PeerID.init(seckey.getPublicKey().tryGet()).tryGet()) + peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet()) peersCtx.add(BlockExcPeerCtx( id: peers[i] @@ -421,22 +502,22 @@ suite "Task Handler": engine.pricing = Pricing.example.some test "Should send want-blocks in priority order": - proc sendBlocks( - id: PeerID, - blks: seq[bt.Block]) {.gcsafe, async.} = - check blks.len == 2 + proc sendBlocksDelivery( + id: PeerId, + blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + check blocksDelivery.len == 2 check: - blks[1].cid == blocks[0].cid - blks[0].cid == blocks[1].cid + blocksDelivery[1].address == blocks[0].address + blocksDelivery[0].address == blocks[1].address for blk in blocks: (await engine.localStore.putBlock(blk)).tryGet() - engine.network.request.sendBlocks = sendBlocks + engine.network.request.sendBlocksDelivery = sendBlocksDelivery # second block to send by priority peersCtx[0].peerWants.add( - Entry( - `block`: blocks[0].cid.data.buffer, + WantListEntry( + address: blocks[0].address, priority: 49, cancel: false, wantType: WantType.WantBlock, @@ -445,8 +526,8 @@ suite "Task Handler": # first block to send by priority peersCtx[0].peerWants.add( - Entry( - `block`: blocks[1].cid.data.buffer, + WantListEntry( + address: blocks[1].address, priority: 50, cancel: false, wantType: WantType.WantBlock, @@ -455,16 +536,49 @@ suite "Task Handler": await engine.taskHandler(peersCtx[0]) + test "Should set in-flight for outgoing blocks": + proc sendBlocksDelivery( + id: PeerId, + blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + check peersCtx[0].peerWants[0].inFlight + + for blk in blocks: + (await engine.localStore.putBlock(blk)).tryGet() + engine.network.request.sendBlocksDelivery = sendBlocksDelivery + + peersCtx[0].peerWants.add(WantListEntry( + address: blocks[0].address, + priority: 50, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + inFlight: false) + ) + await engine.taskHandler(peersCtx[0]) + + test "Should clear in-flight when local lookup fails": + peersCtx[0].peerWants.add(WantListEntry( + address: blocks[0].address, + priority: 50, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + inFlight: false) + ) + await engine.taskHandler(peersCtx[0]) + + check not peersCtx[0].peerWants[0].inFlight + test "Should send presence": let present = blocks - let missing = @[bt.Block.new("missing".toBytes).tryGet()] + let missing = @[Block.new("missing".toBytes).tryGet()] let price = (!engine.pricing).price - proc sendPresence(id: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = check presence.mapIt(!Presence.init(it)) == @[ - Presence(cid: present[0].cid, have: true, price: price), - Presence(cid: present[1].cid, have: true, price: price), - Presence(cid: missing[0].cid, have: false) + Presence(address: present[0].address, have: true, price: price), + Presence(address: present[1].address, have: true, price: price), + Presence(address: missing[0].address, have: false) ] for blk in blocks: @@ -473,8 +587,8 @@ suite "Task Handler": # have block peersCtx[0].peerWants.add( - Entry( - `block`: present[0].cid.data.buffer, + WantListEntry( + address: present[0].address, priority: 1, cancel: false, wantType: WantType.WantHave, @@ -483,8 +597,8 @@ suite "Task Handler": # have block peersCtx[0].peerWants.add( - Entry( - `block`: present[1].cid.data.buffer, + WantListEntry( + address: present[1].address, priority: 1, cancel: false, wantType: WantType.WantHave, @@ -493,8 +607,8 @@ suite "Task Handler": # don't have block peersCtx[0].peerWants.add( - Entry( - `block`: missing[0].cid.data.buffer, + WantListEntry( + address: missing[0].address, priority: 1, cancel: false, wantType: WantType.WantHave, diff --git a/tests/codex/blockexchange/engine/testpayments.nim b/tests/codex/blockexchange/engine/testpayments.nim index b50a1720..03c08e09 100644 --- a/tests/codex/blockexchange/engine/testpayments.nim +++ b/tests/codex/blockexchange/engine/testpayments.nim @@ -2,8 +2,9 @@ import std/unittest import pkg/codex/stores import ../../examples +import ../../helpers -suite "engine payments": +checksuite "engine payments": let address = EthAddress.example let amount = 42.u256 diff --git a/tests/codex/blockexchange/protobuf/testpayments.nim b/tests/codex/blockexchange/protobuf/testpayments.nim index c9d6f9c6..81bc5dfc 100644 --- a/tests/codex/blockexchange/protobuf/testpayments.nim +++ b/tests/codex/blockexchange/protobuf/testpayments.nim @@ -1,10 +1,13 @@ -import pkg/asynctest import pkg/chronos import pkg/stew/byteutils -import ../../examples import pkg/codex/stores -suite "account protobuf messages": +import ../../../asynctest +import ../../examples +import ../../helpers + + +checksuite "account protobuf messages": let account = Account(address: EthAddress.example) let message = AccountMessage.init(account) @@ -20,7 +23,7 @@ suite "account protobuf messages": incorrect.address.del(0) check Account.init(incorrect).isNone -suite "channel update messages": +checksuite "channel update messages": let state = SignedState.example let update = StateChannelUpdate.init(state) diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 7d02269e..963dd0ec 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -1,19 +1,19 @@ -import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/codex/blockexchange/protobuf/presence + +import ../../../asynctest import ../../examples +import ../../helpers -suite "block presence protobuf messages": +checksuite "block presence protobuf messages": - let cid = Cid.example - let price = UInt256.example - let presence = Presence(cid: cid, have: true, price: price) - let message = PresenceMessage.init(presence) - - test "encodes CID": - check message.cid == cid.data.buffer + let + cid = Cid.example + address = BlockAddress(leaf: false, cid: cid) + price = UInt256.example + presence = Presence(address: address, have: true, price: price) + message = PresenceMessage.init(presence) test "encodes have/donthave": var presence = presence @@ -26,12 +26,7 @@ suite "block presence protobuf messages": check message.price == @(price.toBytesBE) test "decodes CID": - check Presence.init(message).?cid == cid.some - - test "fails to decode when CID is invalid": - var incorrect = message - incorrect.cid.del(0) - check Presence.init(incorrect).isNone + check Presence.init(message).?address == address.some test "decodes have/donthave": var message = message diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 9f3c399e..756d86a1 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -1,24 +1,22 @@ import std/sequtils import std/tables -import pkg/asynctest import pkg/chronos -import pkg/libp2p -import pkg/libp2p/errors import pkg/codex/rng import pkg/codex/chunker import pkg/codex/blocktype as bt import pkg/codex/blockexchange -import ../helpers +import ../../asynctest import ../examples +import ../helpers -suite "Network - Handlers": +asyncchecksuite "Network - Handlers": let rng = Rng.instance() seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) var @@ -49,13 +47,13 @@ suite "Network - Handlers": discard await networkPeer.connect() test "Want List handler": - proc wantListHandler(peer: PeerID, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = # check that we got the correct amount of entries check wantList.entries.len == 4 for b in blocks: - check b.cid in wantList.entries - let entry = wantList.entries[wantList.entries.find(b.cid)] + check b.address in wantList.entries + let entry = wantList.entries[wantList.entries.find(b.address)] check entry.wantType == WantType.WantHave check entry.priority == 1 check entry.cancel == true @@ -71,29 +69,29 @@ suite "Network - Handlers": true, true) let msg = Message(wantlist: wantList) - await buffer.pushData(lenPrefix(ProtobufEncode(msg))) + await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) test "Blocks Handler": - proc blocksHandler(peer: PeerID, blks: seq[bt.Block]) {.gcsafe, async.} = - check blks == blocks + proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + check blocks == blocksDelivery.mapIt(it.blk) done.complete() - network.handlers.onBlocks = blocksHandler + network.handlers.onBlocksDelivery = blocksDeliveryHandler - let msg = Message(payload: makeBlocks(blocks)) - await buffer.pushData(lenPrefix(ProtobufEncode(msg))) + let msg = Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) test "Presence Handler": proc presenceHandler( - peer: PeerID, - precense: seq[BlockPresence]) {.gcsafe, async.} = + peer: PeerId, + presence: seq[BlockPresence]) {.gcsafe, async.} = for b in blocks: check: - b.cid in precense + b.address in presence done.complete() @@ -102,42 +100,42 @@ suite "Network - Handlers": let msg = Message( blockPresences: blocks.mapIt( BlockPresence( - cid: it.cid.data.buffer, + address: it.address, type: BlockPresenceType.Have ))) - await buffer.pushData(lenPrefix(ProtobufEncode(msg))) + await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) test "Handles account messages": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerID, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = check received == account done.complete() network.handlers.onAccount = handleAccount let message = Message(account: AccountMessage.init(account)) - await buffer.pushData(lenPrefix(ProtobufEncode(message))) + await buffer.pushData(lenPrefix(protobufEncode(message))) await done.wait(100.millis) test "Handles payment messages": let payment = SignedState.example - proc handlePayment(peer: PeerID, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = check received == payment done.complete() network.handlers.onPayment = handlePayment let message = Message(payment: StateChannelUpdate.init(payment)) - await buffer.pushData(lenPrefix(ProtobufEncode(message))) + await buffer.pushData(lenPrefix(protobufEncode(message))) await done.wait(100.millis) -suite "Network - Senders": +asyncchecksuite "Network - Senders": let chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) @@ -179,13 +177,13 @@ suite "Network - Senders": switch2.stop()) test "Send want list": - proc wantListHandler(peer: PeerID, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = # check that we got the correct amount of entries check wantList.entries.len == 4 for b in blocks: - check b.cid in wantList.entries - let entry = wantList.entries[wantList.entries.find(b.cid)] + check b.address in wantList.entries + let entry = wantList.entries[wantList.entries.find(b.address)] check entry.wantType == WantType.WantHave check entry.priority == 1 check entry.cancel == true @@ -196,31 +194,31 @@ suite "Network - Senders": network2.handlers.onWantList = wantListHandler await network1.sendWantList( switch2.peerInfo.peerId, - blocks.mapIt( it.cid ), + blocks.mapIt( it.address ), 1, true, WantType.WantHave, true, true) await done.wait(500.millis) test "send blocks": - proc blocksHandler(peer: PeerID, blks: seq[bt.Block]) {.gcsafe, async.} = - check blks == blocks + proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + check blocks == blocksDelivery.mapIt(it.blk) done.complete() - network2.handlers.onBlocks = blocksHandler - await network1.sendBlocks( + network2.handlers.onBlocksDelivery = blocksDeliveryHandler + await network1.sendBlocksDelivery( switch2.peerInfo.peerId, - blocks) + blocks.mapIt(BlockDelivery(blk: it, address: it.address))) await done.wait(500.millis) test "send presence": proc presenceHandler( - peer: PeerID, + peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = for b in blocks: check: - b.cid in precense + b.address in precense done.complete() @@ -230,7 +228,7 @@ suite "Network - Senders": switch2.peerInfo.peerId, blocks.mapIt( BlockPresence( - cid: it.cid.data.buffer, + address: it.address, type: BlockPresenceType.Have ))) @@ -239,7 +237,7 @@ suite "Network - Senders": test "send account": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerID, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = check received == account done.complete() @@ -251,7 +249,7 @@ suite "Network - Senders": test "send payment": let payment = SignedState.example - proc handlePayment(peer: PeerID, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = check received == payment done.complete() @@ -260,11 +258,10 @@ suite "Network - Senders": await network1.sendPayment(switch2.peerInfo.peerId, payment) await done.wait(500.millis) -suite "Network - Test Limits": +asyncchecksuite "Network - Test Limits": var switch1, switch2: Switch network1, network2: BlockExcNetwork - blocks: seq[bt.Block] done: Future[void] setup: @@ -296,7 +293,7 @@ suite "Network - Test Limits": test "Concurrent Sends": let account = Account(address: EthAddress.example) network2.handlers.onAccount = - proc(peer: PeerID, received: Account) {.gcsafe, async.} = + proc(peer: PeerId, received: Account) {.gcsafe, async.} = check false let fut = network1.send( diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index 1cf66a09..6d7a1af3 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -1,16 +1,17 @@ import std/sugar import std/sequtils +import std/unittest -import pkg/unittest2 import pkg/libp2p import pkg/codex/blockexchange/peers import pkg/codex/blockexchange/protobuf/blockexc import pkg/codex/blockexchange/protobuf/presence +import ../helpers import ../examples -suite "Peer Context Store": +checksuite "Peer Context Store": var store: PeerCtxStore peerCtx: BlockExcPeerCtx @@ -30,16 +31,16 @@ suite "Peer Context Store": test "Should get peer": check store.get(peerCtx.id) == peerCtx -suite "Peer Context Store Peer Selection": +checksuite "Peer Context Store Peer Selection": var store: PeerCtxStore peerCtxs: seq[BlockExcPeerCtx] - cids: seq[Cid] + addresses: seq[BlockAddress] setup: store = PeerCtxStore.new() - cids = collect(newSeq): - for i in 0..<10: Cid.example + addresses = collect(newSeq): + for i in 0..<10: BlockAddress(leaf: false, cid: Cid.example) peerCtxs = collect(newSeq): for i in 0..<10: BlockExcPeerCtx.example @@ -49,20 +50,20 @@ suite "Peer Context Store Peer Selection": teardown: store = nil - cids = @[] + addresses = @[] peerCtxs = @[] test "Should select peers that have Cid": peerCtxs[0].blocks = collect(initTable): - for i, c in cids: - { c: Presence(cid: c, price: i.u256) } + for i, a in addresses: + { a: Presence(address: a, price: i.u256) } peerCtxs[5].blocks = collect(initTable): - for i, c in cids: - { c: Presence(cid: c, price: i.u256) } + for i, a in addresses: + { a: Presence(address: a, price: i.u256) } let - peers = store.peersHave(cids[0]) + peers = store.peersHave(addresses[0]) check peers.len == 2 check peerCtxs[0] in peers @@ -70,19 +71,19 @@ suite "Peer Context Store Peer Selection": test "Should select cheapest peers for Cid": peerCtxs[0].blocks = collect(initTable): - for i, c in cids: - { c: Presence(cid: c, price: (5 + i).u256) } + for i, a in addresses: + { a: Presence(address: a, price: (5 + i).u256) } peerCtxs[5].blocks = collect(initTable): - for i, c in cids: - { c: Presence(cid: c, price: (2 + i).u256) } + for i, a in addresses: + { a: Presence(address: a, price: (2 + i).u256) } peerCtxs[9].blocks = collect(initTable): - for i, c in cids: - { c: Presence(cid: c, price: i.u256) } + for i, a in addresses: + { a: Presence(address: a, price: i.u256) } let - peers = store.selectCheapest(cids[0]) + peers = store.selectCheapest(addresses[0]) check peers.len == 3 check peers[0] == peerCtxs[9] @@ -91,9 +92,9 @@ suite "Peer Context Store Peer Selection": test "Should select peers that want Cid": let - entries = cids.mapIt( - Entry( - `block`: it.data.buffer, + entries = addresses.mapIt( + WantListEntry( + address: it, priority: 1, cancel: false, wantType: WantType.WantBlock, @@ -103,7 +104,7 @@ suite "Peer Context Store Peer Selection": peerCtxs[5].peerWants = entries let - peers = store.peersWant(cids[4]) + peers = store.peersWant(addresses[4]) check peers.len == 2 check peerCtxs[0] in peers diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index d713cdee..dd94c4da 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -2,23 +2,23 @@ import std/sequtils import std/algorithm import pkg/chronos -import pkg/asynctest -import pkg/libp2p import pkg/stew/byteutils import pkg/codex/blocktype as bt import pkg/codex/blockexchange -import ../examples +import ../helpers +import ../../asynctest -suite "Pending Blocks": +checksuite "Pending Blocks": test "Should add want handle": let pendingBlocks = PendingBlocksManager.new() blk = bt.Block.new("Hello".toBytes).tryGet - handle = pendingBlocks.getWantHandle(blk.cid) - check pendingBlocks.pending(blk.cid) + discard pendingBlocks.getWantHandle(blk.cid) + + check blk.cid in pendingBlocks test "Should resolve want handle": let @@ -27,7 +27,7 @@ suite "Pending Blocks": handle = pendingBlocks.getWantHandle(blk.cid) check blk.cid in pendingBlocks - pendingBlocks.resolve(@[blk]) + pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address))) check (await handle) == blk check blk.cid notin pendingBlocks @@ -59,11 +59,12 @@ suite "Pending Blocks": let pendingBlocks = PendingBlocksManager.new() blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet ) - handles = blks.mapIt( pendingBlocks.getWantHandle( it.cid ) ) + + discard blks.mapIt( pendingBlocks.getWantHandle( it.cid ) ) check: blks.mapIt( $it.cid ).sorted(cmp[string]) == - toSeq(pendingBlocks.wantList).mapIt( $it ).sorted(cmp[string]) + toSeq(pendingBlocks.wantListBlockCids).mapIt( $it ).sorted(cmp[string]) test "Should get want handles list": let @@ -73,7 +74,7 @@ suite "Pending Blocks": wantHandles = toSeq(pendingBlocks.wantHandles) check wantHandles.len == handles.len - pendingBlocks.resolve(blks) + pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address))) check: (await allFinished(wantHandles)).mapIt( $it.read.cid ).sorted(cmp[string]) == diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 8f75b02b..2e68d236 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -7,6 +7,7 @@ import pkg/codex/rng import pkg/codex/stores import pkg/codex/blocktype as bt import pkg/codex/sales +import pkg/codex/merkletree import ../examples export examples @@ -43,19 +44,41 @@ proc example*(_: type bt.Block): bt.Block = let bytes = newSeqWith(length, rand(uint8)) bt.Block.new(bytes).tryGet() -proc example*(_: type PeerId): PeerID = +proc example*(_: type PeerId): PeerId = let key = PrivateKey.random(Rng.instance[]).get PeerId.init(key.getPublicKey().get).get proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx = - BlockExcPeerCtx(id: PeerID.example) + BlockExcPeerCtx(id: PeerId.example) proc example*(_: type Cid): Cid = bt.Block.example.cid +proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = + let bytes = newSeqWith(256, rand(uint8)) + MultiHash.digest($mcodec, bytes).tryGet() + proc example*(_: type Availability): Availability = Availability.init( - size = uint16.example.u256, + totalSize = uint16.example.u256, + freeSize = uint16.example.u256, duration = uint16.example.u256, - minPrice = uint64.example.u256 + minPrice = uint64.example.u256, + maxCollateral = uint16.example.u256 ) + +proc example*(_: type Reservation): Reservation = + Reservation.init( + availabilityId = AvailabilityId(array[32, byte].example), + size = uint16.example.u256, + slotId = SlotId.example + ) + +proc example*(_: type MerkleProof): MerkleProof = + MerkleProof.init(3, @[MultiHash.example]).tryget() + +proc example*(_: type Poseidon2Proof): Poseidon2Proof = + var example = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]() + example.index = 123 + example.path = @[1, 2, 3, 4].mapIt( it.toF ) + example diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 6d431870..89aeafd1 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -1,26 +1,40 @@ -import std/options +import std/sequtils import pkg/chronos import pkg/libp2p import pkg/libp2p/varint -import pkg/codex/blocktype as bt +import pkg/codex/blocktype import pkg/codex/stores import pkg/codex/manifest +import pkg/codex/merkletree +import pkg/codex/blockexchange import pkg/codex/rng +import pkg/codex/utils import ./helpers/nodeutils import ./helpers/randomchunker +import ./helpers/mockchunker import ./helpers/mockdiscovery -import ./helpers/eventually +import ./helpers/always +import ../checktest -export randomchunker, nodeutils, mockdiscovery, eventually +export randomchunker, nodeutils, mockdiscovery, mockchunker, always, checktest, manifest + +export libp2p except setup, eventually # NOTE: The meaning of equality for blocks # is changed here, because blocks are now `ref` # types. This is only in tests!!! -func `==`*(a, b: bt.Block): bool = +func `==`*(a, b: Block): bool = (a.cid == b.cid) and (a.data == b.data) +proc calcEcBlocksCount*(blocksCount: int, ecK, ecM: int): int = + let + rounded = roundUp(blocksCount, ecK) + steps = divUp(rounded, ecK) + + rounded + (steps * ecM) + proc lenPrefix*(msg: openArray[byte]): seq[byte] = ## Write `msg` with a varint-encoded length prefix ## @@ -32,21 +46,94 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] = return buf +proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, CodexTree) = + + if blocks.len == 0: + return failure("Blocks list was empty") + + let + datasetSize = blocks.mapIt(it.data.len).foldl(a + b) + blockSize = blocks.mapIt(it.data.len).foldl(max(a, b)) + tree = ? CodexTree.init(blocks.mapIt(it.cid)) + treeCid = ? tree.rootCid + manifest = Manifest.new( + treeCid = treeCid, + blockSize = NBytes(blockSize), + datasetSize = NBytes(datasetSize)) + + return success((manifest, tree)) + +proc makeWantList*( + cids: seq[Cid], + priority: int = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false + ): WantList = + WantList( + entries: cids.mapIt( + WantListEntry( + address: BlockAddress(leaf: false, cid: it), + priority: priority.int32, + cancel: cancel, + wantType: wantType, + sendDontHave: sendDontHave) ), + full: full) + +proc storeDataGetManifest*(store: BlockStore, chunker: Chunker): Future[Manifest] {.async.} = + var cids = newSeq[Cid]() + + while ( + let chunk = await chunker.getBytes(); + chunk.len > 0): + + let blk = Block.new(chunk).tryGet() + cids.add(blk.cid) + (await store.putBlock(blk)).tryGet() + + let + tree = CodexTree.init(cids).tryGet() + treeCid = tree.rootCid.tryGet() + manifest = Manifest.new( + treeCid = treeCid, + blockSize = NBytes(chunker.chunkSize), + datasetSize = NBytes(chunker.offset)) + + for i in 0..= 0: continue pos.add(i) var - blk = (await store.getBlock(manifest[i])).tryGet() + blk = (await store.getBlock(manifest.treeCid, i)).tryGet() bytePos: seq[int] doAssert bytes < blk.data.len diff --git a/tests/codex/helpers/eventually.nim b/tests/codex/helpers/always.nim similarity index 73% rename from tests/codex/helpers/eventually.nim rename to tests/codex/helpers/always.nim index bbeef3be..74340b74 100644 --- a/tests/codex/helpers/eventually.nim +++ b/tests/codex/helpers/always.nim @@ -1,13 +1,13 @@ import pkg/chronos -template eventually*(condition: untyped, timeout = 5.seconds): bool = +template always*(condition: untyped, timeout = 50.millis): bool = proc loop: Future[bool] {.async.} = let start = Moment.now() while true: - if condition: - return true - if Moment.now() > (start + timeout): + if not condition: return false + if Moment.now() > (start + timeout): + return true else: await sleepAsync(1.millis) await loop() diff --git a/tests/codex/helpers/mockchunker.nim b/tests/codex/helpers/mockchunker.nim new file mode 100644 index 00000000..acbe7ab6 --- /dev/null +++ b/tests/codex/helpers/mockchunker.nim @@ -0,0 +1,45 @@ +import std/sequtils + +import pkg/chronos + +import pkg/codex/chunker +import pkg/codex/rng + +export chunker + +type + MockChunker* = Chunker + +proc new*( + T: type MockChunker, + dataset: openArray[byte], + chunkSize: int | NBytes, + pad: bool = false +): MockChunker = + ## Create a chunker that produces data + ## + + let + chunkSize = chunkSize.NBytes + dataset = @dataset + + var consumed = 0 + proc reader(data: ChunkBuffer, len: int): Future[int] {.async, gcsafe, raises: [Defect].} = + + if consumed >= dataset.len: + return 0 + + var read = 0 + while read < len and + read < chunkSize.int and + (consumed + read) < dataset.len: + data[read] = dataset[consumed + read] + read.inc + + consumed += read + return read + + Chunker.new( + reader = reader, + pad = pad, + chunkSize = chunkSize) diff --git a/tests/codex/helpers/mockclock.nim b/tests/codex/helpers/mockclock.nim index 55283f8a..ada449f9 100644 --- a/tests/codex/helpers/mockclock.nim +++ b/tests/codex/helpers/mockclock.nim @@ -1,4 +1,5 @@ import std/times +import pkg/chronos import codex/clock export clock @@ -6,16 +7,36 @@ export clock type MockClock* = ref object of Clock time: SecondsSince1970 + waiting: seq[Waiting] + Waiting = ref object + until: SecondsSince1970 + future: Future[void] -func new*(_: type MockClock, - time: SecondsSince1970 = getTime().toUnix): MockClock = +func new*( + _: type MockClock, + time: SecondsSince1970 = getTime().toUnix +): MockClock = + ## Create a mock clock instance MockClock(time: time) -func set*(clock: MockClock, time: SecondsSince1970) = +proc set*(clock: MockClock, time: SecondsSince1970) = clock.time = time + var index = 0 + while index < clock.waiting.len: + if clock.waiting[index].until <= clock.time: + clock.waiting[index].future.complete() + clock.waiting.del(index) + else: + inc index -func advance*(clock: MockClock, seconds: int64) = - clock.time += seconds +proc advance*(clock: MockClock, seconds: int64) = + clock.set(clock.time + seconds) method now*(clock: MockClock): SecondsSince1970 = clock.time + +method waitUntil*(clock: MockClock, time: SecondsSince1970) {.async.} = + if time > clock.now(): + let future = newFuture[void]() + clock.waiting.add(Waiting(until: time, future: future)) + await future diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 86e517a6..5d5e8132 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -10,8 +10,6 @@ import pkg/chronos import pkg/libp2p import pkg/questionable -import pkg/questionable/results -import pkg/stew/shims/net import pkg/codex/discovery import pkg/contractabi/address as ca @@ -26,17 +24,21 @@ type publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.} -proc new*(T: type MockDiscovery): T = - T() +proc new*(T: type MockDiscovery): MockDiscovery = + MockDiscovery() proc findPeer*( - d: Discovery, - peerId: PeerID): Future[?PeerRecord] {.async.} = + d: Discovery, + peerId: PeerId +): Future[?PeerRecord] {.async.} = + ## mock find a peer - always return none + ## return none(PeerRecord) method find*( - d: MockDiscovery, - cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = + d: MockDiscovery, + cid: Cid +): Future[seq[SignedPeerRecord]] {.async.} = if isNil(d.findBlockProvidersHandler): return @@ -49,8 +51,9 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = await d.publishBlockProvideHandler(d, cid) method find*( - d: MockDiscovery, - host: ca.Address): Future[seq[SignedPeerRecord]] {.async.} = + d: MockDiscovery, + host: ca.Address +): Future[seq[SignedPeerRecord]] {.async.} = if isNil(d.findHostProvidersHandler): return diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index b8a11f6f..30697f51 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -1,57 +1,86 @@ import std/sequtils import std/tables import std/hashes +import std/sets +import std/sugar +import pkg/questionable import pkg/codex/market +import pkg/codex/contracts/requests +import pkg/codex/contracts/proofs +import pkg/codex/contracts/config +import ../examples export market export tables type MockMarket* = ref object of Market + periodicity: Periodicity activeRequests*: Table[Address, seq[RequestId]] + activeSlots*: Table[Address, seq[SlotId]] requested*: seq[StorageRequest] requestEnds*: Table[RequestId, SecondsSince1970] - state*: Table[RequestId, RequestState] + requestExpiry*: Table[RequestId, SecondsSince1970] + requestState*: Table[RequestId, RequestState] + slotState*: Table[SlotId, SlotState] fulfilled*: seq[Fulfillment] - filled*: seq[Slot] + filled*: seq[MockSlot] + freed*: seq[SlotId] + submitted*: seq[Groth16Proof] + markedAsMissingProofs*: seq[SlotId] + canBeMarkedAsMissing: HashSet[SlotId] withdrawn*: seq[RequestId] + proofPointer*: uint8 + proofsRequired: HashSet[SlotId] + proofsToBeRequired: HashSet[SlotId] + proofChallenge*: ProofChallenge + proofEnds: Table[SlotId, UInt256] signer: Address subscriptions: Subscriptions + config*: MarketplaceConfig Fulfillment* = object requestId*: RequestId - proof*: seq[byte] + proof*: Groth16Proof host*: Address - Slot* = object + MockSlot* = object requestId*: RequestId - slotIndex*: UInt256 - proof*: seq[byte] host*: Address + slotIndex*: UInt256 + proof*: Groth16Proof Subscriptions = object onRequest: seq[RequestSubscription] onFulfillment: seq[FulfillmentSubscription] onSlotFilled: seq[SlotFilledSubscription] + onSlotFreed: seq[SlotFreedSubscription] onRequestCancelled: seq[RequestCancelledSubscription] onRequestFailed: seq[RequestFailedSubscription] + onProofSubmitted: seq[ProofSubmittedSubscription] RequestSubscription* = ref object of Subscription market: MockMarket callback: OnRequest FulfillmentSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId + requestId: ?RequestId callback: OnFulfillment SlotFilledSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId - slotIndex: UInt256 + requestId: ?RequestId + slotIndex: ?UInt256 callback: OnSlotFilled + SlotFreedSubscription* = ref object of Subscription + market: MockMarket + callback: OnSlotFreed RequestCancelledSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId + requestId: ?RequestId callback: OnRequestCancelled RequestFailedSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId + requestId: ?RequestId callback: OnRequestCancelled + ProofSubmittedSubscription = ref object of Subscription + market: MockMarket + callback: OnProofSubmitted proc hash*(address: Address): Hash = hash(address.toArray) @@ -60,20 +89,53 @@ proc hash*(requestId: RequestId): Hash = hash(requestId.toArray) proc new*(_: type MockMarket): MockMarket = - MockMarket(signer: Address.example) + ## Create a new mocked Market instance + ## + let config = MarketplaceConfig( + collateral: CollateralConfig( + repairRewardPercentage: 10, + maxNumberOfSlashes: 5, + slashCriterion: 3, + slashPercentage: 10 + ), + proofs: ProofConfig( + period: 10.u256, + timeout: 5.u256, + downtime: 64.uint8, + downtimeProduct: 67.uint8 + ) + ) + MockMarket(signer: Address.example, config: config) method getSigner*(market: MockMarket): Future[Address] {.async.} = return market.signer +method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = + return Periodicity(seconds: mock.config.proofs.period) + +method proofTimeout*(market: MockMarket): Future[UInt256] {.async.} = + return market.config.proofs.timeout + +method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = + return market.config.proofs.downtime + +method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} = + return market.proofPointer + method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} = market.requested.add(request) var subscriptions = market.subscriptions.onRequest for subscription in subscriptions: - subscription.callback(request.id, request.ask) + subscription.callback(request.id, + request.ask, + request.expiry) method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} = return market.activeRequests[market.signer] +method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} = + return market.activeSlots[market.signer] + method getRequest(market: MockMarket, id: RequestId): Future[?StorageRequest] {.async.} = for request in market.requested: @@ -81,15 +143,35 @@ method getRequest(market: MockMarket, return some request return none StorageRequest -method getState*(market: MockMarket, - requestId: RequestId): Future[?RequestState] {.async.} = - return market.state.?[requestId] +method getActiveSlot*( + market: MockMarket, + slotId: SlotId): Future[?Slot] {.async.} = + + for slot in market.filled: + if slotId(slot.requestId, slot.slotIndex) == slotId and + request =? await market.getRequest(slot.requestId): + return some Slot(request: request, slotIndex: slot.slotIndex) + return none Slot + +method requestState*(market: MockMarket, + requestId: RequestId): Future[?RequestState] {.async.} = + return market.requestState.?[requestId] + +method slotState*(market: MockMarket, + slotId: SlotId): Future[SlotState] {.async.} = + if not market.slotState.hasKey(slotId): + return SlotState.Free + return market.slotState[slotId] method getRequestEnd*(market: MockMarket, id: RequestId): Future[SecondsSince1970] {.async.} = return market.requestEnds[id] -method getHost(market: MockMarket, +method requestExpiresAt*(market: MockMarket, + id: RequestId): Future[SecondsSince1970] {.async.} = + return market.requestExpiry[id] + +method getHost*(market: MockMarket, requestId: RequestId, slotIndex: UInt256): Future[?Address] {.async.} = for slot in market.filled: @@ -102,54 +184,125 @@ proc emitSlotFilled*(market: MockMarket, slotIndex: UInt256) = var subscriptions = market.subscriptions.onSlotFilled for subscription in subscriptions: - if subscription.requestId == requestId and - subscription.slotIndex == slotIndex: + let requestMatches = + subscription.requestId.isNone or + subscription.requestId == some requestId + let slotMatches = + subscription.slotIndex.isNone or + subscription.slotIndex == some slotIndex + if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitRequestCancelled*(market: MockMarket, - requestId: RequestId) = +proc emitSlotFreed*(market: MockMarket, + requestId: RequestId, + slotIndex: UInt256) = + var subscriptions = market.subscriptions.onSlotFreed + for subscription in subscriptions: + subscription.callback(requestId, slotIndex) + +proc emitRequestCancelled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestCancelled for subscription in subscriptions: - if subscription.requestId == requestId: + if subscription.requestId == requestId.some or + subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFulfilled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onFulfillment for subscription in subscriptions: - if subscription.requestId == requestId: + if subscription.requestId == requestId.some or + subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestFailed for subscription in subscriptions: - if subscription.requestId == requestId: + if subscription.requestId == requestId.some or + subscription.requestId.isNone: subscription.callback(requestId) proc fillSlot*(market: MockMarket, requestId: RequestId, slotIndex: UInt256, - proof: seq[byte], + proof: Groth16Proof, host: Address) = - let slot = Slot( + let slot = MockSlot( requestId: requestId, slotIndex: slotIndex, proof: proof, host: host ) market.filled.add(slot) + market.slotState[slotId(slot.requestId, slot.slotIndex)] = SlotState.Filled market.emitSlotFilled(requestId, slotIndex) method fillSlot*(market: MockMarket, requestId: RequestId, slotIndex: UInt256, - proof: seq[byte]) {.async.} = + proof: Groth16Proof, + collateral: UInt256) {.async.} = market.fillSlot(requestId, slotIndex, proof, market.signer) +method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = + market.freed.add(slotId) + for s in market.filled: + if slotId(s.requestId, s.slotIndex) == slotId: + market.emitSlotFreed(s.requestId, s.slotIndex) + break + market.slotState[slotId] = SlotState.Free + method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} = market.withdrawn.add(requestId) market.emitRequestCancelled(requestId) +proc setProofRequired*(mock: MockMarket, id: SlotId, required: bool) = + if required: + mock.proofsRequired.incl(id) + else: + mock.proofsRequired.excl(id) + +method isProofRequired*(mock: MockMarket, + id: SlotId): Future[bool] {.async.} = + return mock.proofsRequired.contains(id) + +proc setProofToBeRequired*(mock: MockMarket, id: SlotId, required: bool) = + if required: + mock.proofsToBeRequired.incl(id) + else: + mock.proofsToBeRequired.excl(id) + +method willProofBeRequired*(mock: MockMarket, + id: SlotId): Future[bool] {.async.} = + return mock.proofsToBeRequired.contains(id) + +method getChallenge*(mock: MockMarket, id: SlotId): Future[ProofChallenge] {.async.} = + return mock.proofChallenge + +proc setProofEnd*(mock: MockMarket, id: SlotId, proofEnd: UInt256) = + mock.proofEnds[id] = proofEnd + +method submitProof*(mock: MockMarket, id: SlotId, proof: Groth16Proof) {.async.} = + mock.submitted.add(proof) + for subscription in mock.subscriptions.onProofSubmitted: + subscription.callback(id) + +method markProofAsMissing*(market: MockMarket, + id: SlotId, + period: Period) {.async.} = + market.markedAsMissingProofs.add(id) + +proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) = + if required: + mock.canBeMarkedAsMissing.incl(id) + else: + mock.canBeMarkedAsMissing.excl(id) + +method canProofBeMarkedAsMissing*(market: MockMarket, + id: SlotId, + period: Period): Future[bool] {.async.} = + return market.canBeMarkedAsMissing.contains(id) + method subscribeRequests*(market: MockMarket, callback: OnRequest): Future[Subscription] {.async.} = @@ -160,18 +313,36 @@ method subscribeRequests*(market: MockMarket, market.subscriptions.onRequest.add(subscription) return subscription +method subscribeFulfillment*(market: MockMarket, + callback: OnFulfillment): + Future[Subscription] {.async.} = + let subscription = FulfillmentSubscription( + market: market, + requestId: none RequestId, + callback: callback + ) + market.subscriptions.onFulfillment.add(subscription) + return subscription + method subscribeFulfillment*(market: MockMarket, requestId: RequestId, callback: OnFulfillment): Future[Subscription] {.async.} = let subscription = FulfillmentSubscription( market: market, - requestId: requestId, + requestId: some requestId, callback: callback ) market.subscriptions.onFulfillment.add(subscription) return subscription +method subscribeSlotFilled*(market: MockMarket, + callback: OnSlotFilled): + Future[Subscription] {.async.} = + let subscription = SlotFilledSubscription(market: market, callback: callback) + market.subscriptions.onSlotFilled.add(subscription) + return subscription + method subscribeSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt256, @@ -179,37 +350,87 @@ method subscribeSlotFilled*(market: MockMarket, Future[Subscription] {.async.} = let subscription = SlotFilledSubscription( market: market, - requestId: requestId, - slotIndex: slotIndex, + requestId: some requestId, + slotIndex: some slotIndex, callback: callback ) market.subscriptions.onSlotFilled.add(subscription) return subscription +method subscribeSlotFreed*(market: MockMarket, + callback: OnSlotFreed): + Future[Subscription] {.async.} = + let subscription = SlotFreedSubscription(market: market, callback: callback) + market.subscriptions.onSlotFreed.add(subscription) + return subscription + method subscribeRequestCancelled*(market: MockMarket, - requestId: RequestId, - callback: OnRequestCancelled): - Future[Subscription] {.async.} = + callback: OnRequestCancelled): + Future[Subscription] {.async.} = let subscription = RequestCancelledSubscription( market: market, - requestId: requestId, + requestId: none RequestId, callback: callback ) market.subscriptions.onRequestCancelled.add(subscription) return subscription +method subscribeRequestCancelled*(market: MockMarket, + requestId: RequestId, + callback: OnRequestCancelled): + Future[Subscription] {.async.} = + let subscription = RequestCancelledSubscription( + market: market, + requestId: some requestId, + callback: callback + ) + market.subscriptions.onRequestCancelled.add(subscription) + return subscription + +method subscribeRequestFailed*(market: MockMarket, + callback: OnRequestFailed): + Future[Subscription] {.async.} = + let subscription = RequestFailedSubscription( + market: market, + requestId: none RequestId, + callback: callback + ) + market.subscriptions.onRequestFailed.add(subscription) + return subscription + method subscribeRequestFailed*(market: MockMarket, requestId: RequestId, callback: OnRequestFailed): Future[Subscription] {.async.} = let subscription = RequestFailedSubscription( market: market, - requestId: requestId, + requestId: some requestId, callback: callback ) market.subscriptions.onRequestFailed.add(subscription) return subscription +method subscribeProofSubmission*(mock: MockMarket, + callback: OnProofSubmitted): + Future[Subscription] {.async.} = + let subscription = ProofSubmittedSubscription( + market: mock, + callback: callback + ) + mock.subscriptions.onProofSubmitted.add(subscription) + return subscription + +method queryPastStorageRequests*(market: MockMarket, + blocksAgo: int): + Future[seq[PastStorageRequest]] {.async.} = + # MockMarket does not have the concept of blocks, so simply return all + # previous events + return market.requested.map(request => + PastStorageRequest(requestId: request.id, + ask: request.ask, + expiry: request.expiry) + ) + method unsubscribe*(subscription: RequestSubscription) {.async.} = subscription.market.subscriptions.onRequest.keepItIf(it != subscription) @@ -219,8 +440,14 @@ method unsubscribe*(subscription: FulfillmentSubscription) {.async.} = method unsubscribe*(subscription: SlotFilledSubscription) {.async.} = subscription.market.subscriptions.onSlotFilled.keepItIf(it != subscription) +method unsubscribe*(subscription: SlotFreedSubscription) {.async.} = + subscription.market.subscriptions.onSlotFreed.keepItIf(it != subscription) + method unsubscribe*(subscription: RequestCancelledSubscription) {.async.} = subscription.market.subscriptions.onRequestCancelled.keepItIf(it != subscription) method unsubscribe*(subscription: RequestFailedSubscription) {.async.} = subscription.market.subscriptions.onRequestFailed.keepItIf(it != subscription) + +method unsubscribe*(subscription: ProofSubmittedSubscription) {.async.} = + subscription.market.subscriptions.onProofSubmitted.keepItIf(it != subscription) diff --git a/tests/codex/helpers/mockproofs.nim b/tests/codex/helpers/mockproofs.nim deleted file mode 100644 index 25bfc6ee..00000000 --- a/tests/codex/helpers/mockproofs.nim +++ /dev/null @@ -1,73 +0,0 @@ -import std/sets -import std/tables -import std/sequtils -import pkg/upraises -import pkg/codex/storageproofs - -type - MockProofs* = ref object of Proofs - periodicity: Periodicity - proofsRequired: HashSet[SlotId] - proofsToBeRequired: HashSet[SlotId] - proofEnds: Table[SlotId, UInt256] - subscriptions: seq[MockSubscription] - MockSubscription* = ref object of Subscription - proofs: MockProofs - callback: OnProofSubmitted - -const DefaultPeriodLength = 10.u256 - -func new*(_: type MockProofs): MockProofs = - MockProofs(periodicity: Periodicity(seconds: DefaultPeriodLength)) - -func setPeriodicity*(mock: MockProofs, periodicity: Periodicity) = - mock.periodicity = periodicity - -method periodicity*(mock: MockProofs): Future[Periodicity] {.async.} = - return mock.periodicity - -proc setProofRequired*(mock: MockProofs, id: SlotId, required: bool) = - if required: - mock.proofsRequired.incl(id) - else: - mock.proofsRequired.excl(id) - -method isProofRequired*(mock: MockProofs, - id: SlotId): Future[bool] {.async.} = - return mock.proofsRequired.contains(id) - -proc setProofToBeRequired*(mock: MockProofs, id: SlotId, required: bool) = - if required: - mock.proofsToBeRequired.incl(id) - else: - mock.proofsToBeRequired.excl(id) - -method willProofBeRequired*(mock: MockProofs, - id: SlotId): Future[bool] {.async.} = - return mock.proofsToBeRequired.contains(id) - -proc setProofEnd*(mock: MockProofs, id: SlotId, proofEnd: UInt256) = - mock.proofEnds[id] = proofEnd - -method getProofEnd*(mock: MockProofs, - id: SlotId): Future[UInt256] {.async.} = - if mock.proofEnds.hasKey(id): - return mock.proofEnds[id] - else: - return UInt256.high - -method submitProof*(mock: MockProofs, - id: SlotId, - proof: seq[byte]) {.async.} = - for subscription in mock.subscriptions: - subscription.callback(id, proof) - -method subscribeProofSubmission*(mock: MockProofs, - callback: OnProofSubmitted): - Future[Subscription] {.async.} = - let subscription = MockSubscription(proofs: mock, callback: callback) - mock.subscriptions.add(subscription) - return subscription - -method unsubscribe*(subscription: MockSubscription) {.async, upraises:[].} = - subscription.proofs.subscriptions.keepItIf(it != subscription) diff --git a/tests/codex/helpers/mockrepostore.nim b/tests/codex/helpers/mockrepostore.nim new file mode 100644 index 00000000..86f881e0 --- /dev/null +++ b/tests/codex/helpers/mockrepostore.nim @@ -0,0 +1,52 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/sequtils +import std/sugar +import pkg/chronos +import pkg/libp2p +import pkg/questionable +import pkg/questionable/results + +import pkg/codex/stores/repostore +import pkg/codex/utils/asynciter + +type + MockRepoStore* = ref object of RepoStore + delBlockCids*: seq[Cid] + getBeMaxNumber*: int + getBeOffset*: int + + testBlockExpirations*: seq[BlockExpiration] + getBlockExpirationsThrows*: bool + +method delBlock*(self: MockRepoStore, cid: Cid): Future[?!void] {.async.} = + self.delBlockCids.add(cid) + self.testBlockExpirations = self.testBlockExpirations.filterIt(it.cid != cid) + return success() + +method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!AsyncIter[BlockExpiration]] {.async.} = + if self.getBlockExpirationsThrows: + raise new CatchableError + + self.getBeMaxNumber = maxNumber + self.getBeOffset = offset + + let + testBlockExpirationsCpy = @(self.testBlockExpirations) + limit = min(offset + maxNumber, len(testBlockExpirationsCpy)) + + let + iter1 = AsyncIter[int].new(offset.. 0): + await stream.pushData(chunk) + finally: + await stream.pushEof() + await stream.close() + +template setupAndTearDown*() {.dirty.} = + var + file: File + chunker: Chunker + switch: Switch + wallet: WalletRef + network: BlockExcNetwork + clock: Clock + localStore: RepoStore + localStoreRepoDs: DataStore + localStoreMetaDs: DataStore + engine: BlockExcEngine + store: NetworkStore + node: CodexNodeRef + blockDiscovery: Discovery + peerStore: PeerCtxStore + pendingBlocks: PendingBlocksManager + discovery: DiscoveryEngine + taskpool: Taskpool + + let + path = currentSourcePath().parentDir + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + + setup: + file = open(path /../ "" /../ "fixtures" / "test.jpg") + chunker = FileChunker.new(file = file, chunkSize = DefaultBlockSize) + switch = newStandardSwitch() + wallet = WalletRef.new(EthPrivateKey.random()) + network = BlockExcNetwork.new(switch) + + clock = SystemClock.new() + localStoreMetaDs = metaTmp.newDb() + localStoreRepoDs = repoTmp.newDb() + localStore = RepoStore.new(localStoreRepoDs, localStoreMetaDs, clock = clock) + await localStore.start() + + blockDiscovery = Discovery.new( + switch.peerInfo.privateKey, + announceAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0") + .expect("Should return multiaddress")]) + peerStore = PeerCtxStore.new() + pendingBlocks = PendingBlocksManager.new() + discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) + engine = BlockExcEngine.new(localStore, wallet, network, discovery, peerStore, pendingBlocks) + store = NetworkStore.new(engine, localStore) + taskpool = Taskpool.new(num_threads = countProcessors()) + node = CodexNodeRef.new( + switch = switch, + networkStore = store, + engine = engine, + prover = Prover.none, + discovery = blockDiscovery, + taskpool = taskpool) + + await node.start() + + teardown: + close(file) + await node.stop() + await metaTmp.destroyDb() + await repoTmp.destroyDb() diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim new file mode 100644 index 00000000..49557f2c --- /dev/null +++ b/tests/codex/node/testcontracts.nim @@ -0,0 +1,149 @@ +import std/os +import std/options +import std/math +import std/times +import std/sequtils +import std/importutils +import std/cpuinfo + +import pkg/chronos +import pkg/stew/byteutils +import pkg/datastore +import pkg/datastore/typedds +import pkg/questionable +import pkg/questionable/results +import pkg/stint +import pkg/poseidon2 +import pkg/poseidon2/io +import pkg/taskpools + +import pkg/nitro +import pkg/codexdht/discv5/protocol as discv5 + +import pkg/codex/logutils +import pkg/codex/stores +import pkg/codex/clock +import pkg/codex/contracts +import pkg/codex/systemclock +import pkg/codex/blockexchange +import pkg/codex/chunker +import pkg/codex/slots +import pkg/codex/manifest +import pkg/codex/discovery +import pkg/codex/erasure +import pkg/codex/merkletree +import pkg/codex/blocktype as bt +import pkg/codex/stores/repostore/coders +import pkg/codex/utils/asynciter +import pkg/codex/indexingstrategy + +import pkg/codex/node {.all.} + +import ../../asynctest +import ../../examples +import ../helpers +import ../helpers/mockmarket +import ../helpers/mockclock + +import ./helpers + +privateAccess(CodexNodeRef) # enable access to private fields + +asyncchecksuite "Test Node - Host contracts": + setupAndTearDown() + + var + sales: Sales + purchasing: Purchasing + manifest: Manifest + manifestCidStr: string + manifestCid: Cid + market: MockMarket + builder: Poseidon2Builder + verifiable: Manifest + verifiableBlock: bt.Block + protected: Manifest + + setup: + # Setup Host Contracts and dependencies + market = MockMarket.new() + sales = Sales.new(market, clock, localStore) + + node.contracts = ( + none ClientInteractions, + some HostInteractions.new(clock, sales), + none ValidatorInteractions) + + await node.start() + + # Populate manifest in local store + manifest = await storeDataGetManifest(localStore, chunker) + let + manifestBlock = bt.Block.new( + manifest.encode().tryGet(), + codec = ManifestCodec).tryGet() + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) + + manifestCid = manifestBlock.cid + manifestCidStr = $(manifestCid) + + (await localStore.putBlock(manifestBlock)).tryGet() + + protected = (await erasure.encode(manifest, 3, 2)).tryGet() + builder = Poseidon2Builder.new(localStore, protected).tryGet() + verifiable = (await builder.buildManifest()).tryGet() + verifiableBlock = bt.Block.new( + verifiable.encode().tryGet(), + codec = ManifestCodec).tryGet() + + (await localStore.putBlock(verifiableBlock)).tryGet() + + test "onExpiryUpdate callback is set": + check sales.onExpiryUpdate.isSome + + test "onExpiryUpdate callback": + let + # The blocks have set default TTL, so in order to update it we have to have larger TTL + expectedExpiry: SecondsSince1970 = clock.now + DefaultBlockTtl.seconds + 11123 + expiryUpdateCallback = !sales.onExpiryUpdate + + (await expiryUpdateCallback(manifestCidStr, expectedExpiry)).tryGet() + + for index in 0.. 0 and blocks.len <= batchSize + return success() + )).tryGet() + + test "Store and retrieve Data Stream": + let + stream = BufferStream.new() + storeFut = node.store(stream) + oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes # Let's check that node.store can correctly rechunk these odd chunks + oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) # TODO: doesn't work with pad=tue + + var + original: seq[byte] + + try: + while ( + let chunk = await oddChunker.getBytes(); + chunk.len > 0): + original &= chunk + await stream.pushData(chunk) + finally: + await stream.pushEof() + await stream.close() + + let + manifestCid = (await storeFut).tryGet() + manifestBlock = (await localStore.getBlock(manifestCid)).tryGet() + localManifest = Manifest.decode(manifestBlock).tryGet() + data = await (await node.retrieve(manifestCid)).drain() + + check: + data.len == localManifest.datasetSize.int + data.len == original.len + sha256.digest(data) == sha256.digest(original) + + test "Retrieve One Block": + let + testString = "Block 1" + blk = bt.Block.new(testString.toBytes).tryGet() + + (await localStore.putBlock(blk)).tryGet() + let stream = (await node.retrieve(blk.cid)).tryGet() + defer: await stream.close() + + var data = newSeq[byte](testString.len) + await stream.readExactly(addr data[0], data.len) + check string.fromBytes(data) == testString + + test "Setup purchase request": + let + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) + manifest = await storeDataGetManifest(localStore, chunker) + manifestBlock = bt.Block.new( + manifest.encode().tryGet(), + codec = ManifestCodec).tryGet() + protected = (await erasure.encode(manifest, 3, 2)).tryGet() + builder = Poseidon2Builder.new(localStore, protected).tryGet() + verifiable = (await builder.buildManifest()).tryGet() + verifiableBlock = bt.Block.new( + verifiable.encode().tryGet(), + codec = ManifestCodec).tryGet() + + (await localStore.putBlock(manifestBlock)).tryGet() + + let + request = (await node.setupRequest( + cid = manifestBlock.cid, + nodes = 5, + tolerance = 2, + duration = 100.u256, + reward = 2.u256, + proofProbability = 3.u256, + expiry = 200.u256, + collateral = 200.u256)).tryGet + + check: + (await verifiableBlock.cid in localStore) == true + request.content.cid == $verifiableBlock.cid + request.content.merkleRoot == builder.verifyRoot.get.toBytes diff --git a/tests/codex/sales/helpers/periods.nim b/tests/codex/sales/helpers/periods.nim new file mode 100644 index 00000000..ba1793c2 --- /dev/null +++ b/tests/codex/sales/helpers/periods.nim @@ -0,0 +1,8 @@ +import pkg/codex/market +import ../../helpers/mockclock + +proc advanceToNextPeriod*(clock: MockClock, market: Market) {.async.} = + let periodicity = await market.periodicity() + let period = periodicity.periodOf(clock.now().u256) + let periodEnd = periodicity.periodEnd(period) + clock.set((periodEnd + 1).truncate(int)) diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim new file mode 100644 index 00000000..e252cd9c --- /dev/null +++ b/tests/codex/sales/states/testcancelled.nim @@ -0,0 +1,45 @@ +import pkg/questionable +import pkg/chronos +import pkg/codex/contracts/requests +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/market + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock + +asyncchecksuite "sales state 'cancelled'": + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let market = MockMarket.new() + let clock = MockClock.new() + + var state: SaleCancelled + var agent: SalesAgent + var returnBytesWas = false + var reprocessSlotWas = false + + setup: + let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot + + let context = SalesContext( + market: market, + clock: clock + ) + agent = newSalesAgent(context, + request.id, + slotIndex, + request.some) + agent.onCleanUp = onCleanUp + state = SaleCancelled.new() + + test "calls onCleanUp with returnBytes = false and reprocessSlot = true": + discard await state.run(agent) + check eventually returnBytesWas == true + check eventually reprocessSlotWas == false diff --git a/tests/codex/sales/states/testdownloading.nim b/tests/codex/sales/states/testdownloading.nim new file mode 100644 index 00000000..fc81b158 --- /dev/null +++ b/tests/codex/sales/states/testdownloading.nim @@ -0,0 +1,30 @@ +import std/unittest +import pkg/questionable +import pkg/codex/contracts/requests +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/downloading +import pkg/codex/sales/states/errored +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/filled +import ../../examples +import ../../helpers + +checksuite "sales state 'downloading'": + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + var state: SaleDownloading + + setup: + state = SaleDownloading.new() + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "switches to filled state when slot is filled": + let next = state.onSlotFilled(request.id, slotIndex) + check !next of SaleFilled diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim new file mode 100644 index 00000000..dc525894 --- /dev/null +++ b/tests/codex/sales/states/testerrored.nim @@ -0,0 +1,49 @@ +import pkg/questionable +import pkg/chronos +import pkg/codex/contracts/requests +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/market + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock + +asyncchecksuite "sales state 'errored'": + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let market = MockMarket.new() + let clock = MockClock.new() + + var state: SaleErrored + var agent: SalesAgent + var returnBytesWas = false + var reprocessSlotWas = false + + setup: + let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot + + let context = SalesContext( + market: market, + clock: clock + ) + agent = newSalesAgent(context, + request.id, + slotIndex, + request.some) + agent.onCleanUp = onCleanUp + state = SaleErrored(error: newException(ValueError, "oh no!")) + + test "calls onCleanUp with returnBytes = false and reprocessSlot = true": + state = SaleErrored( + error: newException(ValueError, "oh no!"), + reprocessSlot: true + ) + discard await state.run(agent) + check eventually returnBytesWas == true + check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim new file mode 100644 index 00000000..e8a16e10 --- /dev/null +++ b/tests/codex/sales/states/testfilled.nim @@ -0,0 +1,69 @@ +import pkg/questionable/results + +import pkg/codex/clock +import pkg/codex/contracts/requests +import pkg/codex/sales +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/sales/states/filled +import pkg/codex/sales/states/errored +import pkg/codex/sales/states/proving +import pkg/codex/sales/states/finished + +import ../../../asynctest +import ../../helpers/mockmarket +import ../../examples +import ../../helpers + +checksuite "sales state 'filled'": + + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + + var market: MockMarket + var slot: MockSlot + var agent: SalesAgent + var state: SaleFilled + var onExpiryUpdatePassedExpiry: SecondsSince1970 + + setup: + market = MockMarket.new() + slot = MockSlot(requestId: request.id, + host: Address.example, + slotIndex: slotIndex, + proof: Groth16Proof.default) + + market.requestEnds[request.id] = 321 + onExpiryUpdatePassedExpiry = -1 + let onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + onExpiryUpdatePassedExpiry = expiry + return success() + let context = SalesContext(market: market, onExpiryUpdate: some onExpiryUpdate) + + agent = newSalesAgent(context, + request.id, + slotIndex, + some request) + state = SaleFilled.new() + + test "switches to proving state when slot is filled by me": + slot.host = await market.getSigner() + market.filled = @[slot] + let next = await state.run(agent) + check !next of SaleProving + + test "calls onExpiryUpdate with request end": + slot.host = await market.getSigner() + market.filled = @[slot] + + let expectedExpiry = 123 + market.requestEnds[request.id] = expectedExpiry + let next = await state.run(agent) + check !next of SaleProving + check onExpiryUpdatePassedExpiry == expectedExpiry + + test "switches to error state when slot is filled by another host": + slot.host = Address.example + market.filled = @[slot] + let next = await state.run(agent) + check !next of SaleErrored diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim new file mode 100644 index 00000000..ef2e2aa4 --- /dev/null +++ b/tests/codex/sales/states/testfilling.nim @@ -0,0 +1,30 @@ +import std/unittest +import pkg/questionable +import pkg/codex/contracts/requests +import pkg/codex/sales/states/filling +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/filled +import ../../examples +import ../../helpers + +checksuite "sales state 'filling'": + + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + var state: SaleFilling + + setup: + state = SaleFilling.new() + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "switches to filled state when slot is filled": + let next = state.onSlotFilled(request.id, slotIndex) + check !next of SaleFilled diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim new file mode 100644 index 00000000..a5f6690f --- /dev/null +++ b/tests/codex/sales/states/testfinished.nim @@ -0,0 +1,24 @@ +import std/unittest +import pkg/questionable +import pkg/codex/contracts/requests +import pkg/codex/sales/states/finished +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import ../../examples +import ../../helpers + +checksuite "sales state 'finished'": + + let request = StorageRequest.example + var state: SaleFinished + + setup: + state = SaleFinished.new() + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim new file mode 100644 index 00000000..680dca8d --- /dev/null +++ b/tests/codex/sales/states/testignored.nim @@ -0,0 +1,45 @@ +import pkg/questionable +import pkg/chronos +import pkg/codex/contracts/requests +import pkg/codex/sales/states/ignored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/market + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock + +asyncchecksuite "sales state 'ignored'": + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let market = MockMarket.new() + let clock = MockClock.new() + + var state: SaleIgnored + var agent: SalesAgent + var returnBytesWas = false + var reprocessSlotWas = false + + setup: + let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot + + let context = SalesContext( + market: market, + clock: clock + ) + agent = newSalesAgent(context, + request.id, + slotIndex, + request.some) + agent.onCleanUp = onCleanUp + state = SaleIgnored.new() + + test "calls onCleanUp with returnBytes = false and reprocessSlot = true": + discard await state.run(agent) + check eventually returnBytesWas == false + check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testinitialproving.nim b/tests/codex/sales/states/testinitialproving.nim new file mode 100644 index 00000000..69355567 --- /dev/null +++ b/tests/codex/sales/states/testinitialproving.nim @@ -0,0 +1,113 @@ +import pkg/questionable +import pkg/chronos +import pkg/codex/contracts/requests +import pkg/codex/sales/states/initialproving +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/filling +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/market + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock +import ../helpers/periods + +asyncchecksuite "sales state 'initialproving'": + let proof = Groth16Proof.example + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let market = MockMarket.new() + let clock = MockClock.new() + + var state: SaleInitialProving + var agent: SalesAgent + var receivedChallenge: ProofChallenge + + setup: + let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + receivedChallenge = challenge + return success(proof) + let context = SalesContext( + onProve: onProve.some, + market: market, + clock: clock + ) + agent = newSalesAgent(context, + request.id, + slotIndex, + request.some) + state = SaleInitialProving.new() + + proc allowProofToStart {.async.} = + # wait until we're in initialproving state + await sleepAsync(10.millis) + # it won't start proving until the next period + await clock.advanceToNextPeriod(market) + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "waits for the beginning of the period to get the challenge": + let future = state.run(agent) + await sleepAsync(10.millis) + check not future.finished + await allowProofToStart() + discard await future + + test "waits another period when the proof pointer is about to wrap around": + market.proofPointer = 250 + let future = state.run(agent) + await allowProofToStart() + await sleepAsync(10.millis) + check not future.finished + market.proofPointer = 100 + await allowProofToStart() + discard await future + + test "onProve callback provides proof challenge": + market.proofChallenge = ProofChallenge.example + + let future = state.run(agent) + await allowProofToStart() + + discard await future + + check receivedChallenge == market.proofChallenge + + test "switches to filling state when initial proving is complete": + let future = state.run(agent) + await allowProofToStart() + let next = await future + + check !next of SaleFilling + check SaleFilling(!next).proof == proof + + test "switches to errored state when onProve callback fails": + let onProveFailed: OnProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + return failure("oh no!") + + let proofFailedContext = SalesContext( + onProve: onProveFailed.some, + market: market, + clock: clock + ) + agent = newSalesAgent(proofFailedContext, + request.id, + slotIndex, + request.some) + + let future = state.run(agent) + await allowProofToStart() + let next = await future + + check !next of SaleErrored diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim new file mode 100644 index 00000000..c095d99e --- /dev/null +++ b/tests/codex/sales/states/testpreparing.nim @@ -0,0 +1,100 @@ +import pkg/chronos +import pkg/questionable +import pkg/datastore +import pkg/stew/byteutils +import pkg/codex/contracts/requests +import pkg/codex/sales/states/preparing +import pkg/codex/sales/states/downloading +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/filled +import pkg/codex/sales/states/ignored +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/sales/reservations +import pkg/codex/stores/repostore +import ../../../asynctest +import ../../helpers +import ../../examples +import ../../helpers/mockmarket +import ../../helpers/mockreservations +import ../../helpers/mockclock + +asyncchecksuite "sales state 'preparing'": + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let market = MockMarket.new() + let clock = MockClock.new() + var agent: SalesAgent + var state: SalePreparing + var repo: RepoStore + var availability: Availability + var context: SalesContext + var reservations: MockReservations + + setup: + availability = Availability( + totalSize: request.ask.slotSize + 100.u256, + freeSize: request.ask.slotSize + 100.u256, + duration: request.ask.duration + 60.u256, + minPrice: request.ask.pricePerSlot - 10.u256, + maxCollateral: request.ask.collateral + 400.u256 + ) + let repoDs = SQLiteDatastore.new(Memory).tryGet() + let metaDs = SQLiteDatastore.new(Memory).tryGet() + repo = RepoStore.new(repoDs, metaDs) + await repo.start() + + state = SalePreparing.new() + context = SalesContext( + market: market, + clock: clock + ) + + reservations = MockReservations.new(repo) + context.reservations = reservations + agent = newSalesAgent(context, + request.id, + slotIndex, + request.some) + + teardown: + await repo.stop() + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "switches to filled state when slot is filled": + let next = state.onSlotFilled(request.id, slotIndex) + check !next of SaleFilled + + proc createAvailability() {.async.} = + let a = await reservations.createAvailability( + availability.totalSize, + availability.duration, + availability.minPrice, + availability.maxCollateral + ) + availability = a.get + + test "run switches to ignored when no availability": + let next = await state.run(agent) + check !next of SaleIgnored + + test "run switches to downloading when reserved": + await createAvailability() + let next = await state.run(agent) + check !next of SaleDownloading + + test "run switches to ignored when reserve fails with BytesOutOfBounds": + await createAvailability() + reservations.setCreateReservationThrowBytesOutOfBoundsError(true) + + let next = await state.run(agent) + check !next of SaleIgnored diff --git a/tests/codex/sales/states/testproving.nim b/tests/codex/sales/states/testproving.nim new file mode 100644 index 00000000..5f18746b --- /dev/null +++ b/tests/codex/sales/states/testproving.nim @@ -0,0 +1,107 @@ +import pkg/chronos +import pkg/questionable +import pkg/codex/contracts/requests +import pkg/codex/sales/states/proving +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/payout +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock + +asyncchecksuite "sales state 'proving'": + + let slot = Slot.example + let request = slot.request + let proof = Groth16Proof.example + + var market: MockMarket + var clock: MockClock + var agent: SalesAgent + var state: SaleProving + var receivedChallenge: ProofChallenge + + setup: + clock = MockClock.new() + market = MockMarket.new() + let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + receivedChallenge = challenge + return success(proof) + let context = SalesContext(market: market, clock: clock, onProve: onProve.some) + agent = newSalesAgent(context, + request.id, + slot.slotIndex, + request.some) + state = SaleProving.new() + + proc advanceToNextPeriod(market: Market) {.async.} = + let periodicity = await market.periodicity() + let current = periodicity.periodOf(clock.now().u256) + let periodEnd = periodicity.periodEnd(current) + clock.set(periodEnd.truncate(int64) + 1) + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "submits proofs": + var receivedIds: seq[SlotId] + + proc onProofSubmission(id: SlotId) = + receivedIds.add(id) + + let subscription = await market.subscribeProofSubmission(onProofSubmission) + market.slotState[slot.id] = SlotState.Filled + + let future = state.run(agent) + + market.setProofRequired(slot.id, true) + await market.advanceToNextPeriod() + + check eventually receivedIds.contains(slot.id) + + await future.cancelAndWait() + await subscription.unsubscribe() + + test "switches to payout state when request is finished": + market.slotState[slot.id] = SlotState.Filled + + let future = state.run(agent) + + market.slotState[slot.id] = SlotState.Finished + await market.advanceToNextPeriod() + + check eventually future.finished + check !(future.read()) of SalePayout + + test "switches to error state when slot is no longer filled": + market.slotState[slot.id] = SlotState.Filled + + let future = state.run(agent) + + market.slotState[slot.id] = SlotState.Free + await market.advanceToNextPeriod() + + check eventually future.finished + check !(future.read()) of SaleErrored + + test "onProve callback provides proof challenge": + market.proofChallenge = ProofChallenge.example + market.slotState[slot.id] = SlotState.Filled + market.setProofRequired(slot.id, true) + + let future = state.run(agent) + + check eventually receivedChallenge == market.proofChallenge + + await future.cancelAndWait() diff --git a/tests/codex/sales/states/testsimulatedproving.nim b/tests/codex/sales/states/testsimulatedproving.nim new file mode 100644 index 00000000..f4ca3ba9 --- /dev/null +++ b/tests/codex/sales/states/testsimulatedproving.nim @@ -0,0 +1,98 @@ +import pkg/chronos +import pkg/questionable +import pkg/codex/contracts/requests +import pkg/codex/sales/states/provingsimulated +import pkg/codex/sales/states/proving +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/payout +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock + +asyncchecksuite "sales state 'simulated-proving'": + + let slot = Slot.example + let request = slot.request + let proof = Groth16Proof.example + let failEveryNProofs = 3 + let totalProofs = 6 + + var market: MockMarket + var clock: MockClock + var agent: SalesAgent + var state: SaleProvingSimulated + + var proofSubmitted: Future[void] = newFuture[void]("proofSubmitted") + var subscription: Subscription + + setup: + clock = MockClock.new() + + proc onProofSubmission(id: SlotId) = + proofSubmitted.complete() + proofSubmitted = newFuture[void]("proofSubmitted") + + market = MockMarket.new() + market.slotState[slot.id] = SlotState.Filled + market.setProofRequired(slot.id, true) + subscription = await market.subscribeProofSubmission(onProofSubmission) + + let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + return success(proof) + let context = SalesContext(market: market, clock: clock, onProve: onProve.some) + agent = newSalesAgent(context, + request.id, + slot.slotIndex, + request.some) + state = SaleProvingSimulated.new() + state.failEveryNProofs = failEveryNProofs + + teardown: + await subscription.unsubscribe() + + proc advanceToNextPeriod(market: Market) {.async.} = + let periodicity = await market.periodicity() + let current = periodicity.periodOf(clock.now().u256) + let periodEnd = periodicity.periodEnd(current) + clock.set(periodEnd.truncate(int64) + 1) + + proc waitForProvingRounds(market: Market, rounds: int) {.async.} = + var rnds = rounds - 1 # proof round runs prior to advancing + while rnds > 0: + await market.advanceToNextPeriod() + await proofSubmitted + rnds -= 1 + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "submits invalid proof every 3 proofs": + let future = state.run(agent) + let invalid = Groth16Proof.default + + await market.waitForProvingRounds(totalProofs) + check market.submitted == @[proof, proof, invalid, proof, proof, invalid] + + await future.cancelAndWait() + + test "switches to payout state when request is finished": + market.slotState[slot.id] = SlotState.Filled + + let future = state.run(agent) + + market.slotState[slot.id] = SlotState.Finished + await market.advanceToNextPeriod() + + check eventually future.finished + check !(future.read()) of SalePayout diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim new file mode 100644 index 00000000..e02b3c90 --- /dev/null +++ b/tests/codex/sales/states/testunknown.nim @@ -0,0 +1,63 @@ +import pkg/codex/contracts/requests +import pkg/codex/sales +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/sales/states/unknown +import pkg/codex/sales/states/errored +import pkg/codex/sales/states/filled +import pkg/codex/sales/states/finished +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/payout + +import ../../../asynctest +import ../../helpers/mockmarket +import ../../examples +import ../../helpers + +checksuite "sales state 'unknown'": + + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let slotId = slotId(request.id, slotIndex) + + var market: MockMarket + var agent: SalesAgent + var state: SaleUnknown + + setup: + market = MockMarket.new() + let context = SalesContext(market: market) + agent = newSalesAgent(context, + request.id, + slotIndex, + StorageRequest.none) + state = SaleUnknown.new() + + test "switches to error state when on chain state cannot be fetched": + let next = await state.run(agent) + check !next of SaleErrored + + test "switches to error state when on chain state is 'free'": + market.slotState[slotId] = SlotState.Free + let next = await state.run(agent) + check !next of SaleErrored + + test "switches to filled state when on chain state is 'filled'": + market.slotState[slotId] = SlotState.Filled + let next = await state.run(agent) + check !next of SaleFilled + + test "switches to payout state when on chain state is 'finished'": + market.slotState[slotId] = SlotState.Finished + let next = await state.run(agent) + check !next of SalePayout + + test "switches to finished state when on chain state is 'paid'": + market.slotState[slotId] = SlotState.Paid + let next = await state.run(agent) + check !next of SaleFinished + + test "switches to failed state when on chain state is 'failed'": + market.slotState[slotId] = SlotState.Failed + let next = await state.run(agent) + check !next of SaleFailed diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim new file mode 100644 index 00000000..56508a22 --- /dev/null +++ b/tests/codex/sales/testreservations.nim @@ -0,0 +1,390 @@ +import std/random +import std/sequtils + +import pkg/questionable +import pkg/questionable/results +import pkg/chronos +import pkg/datastore + +import pkg/codex/stores +import pkg/codex/errors +import pkg/codex/sales +import pkg/codex/utils/json + +import ../../asynctest +import ../examples +import ../helpers + +const CONCURRENCY_TESTS_COUNT = 1000 + +asyncchecksuite "Reservations module": + var + repo: RepoStore + repoDs: Datastore + metaDs: Datastore + reservations: Reservations + let + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + + setup: + randomize(1.int64) # create reproducible results + repoDs = repoTmp.newDb() + metaDs = metaTmp.newDb() + repo = RepoStore.new(repoDs, metaDs) + reservations = Reservations.new(repo) + + teardown: + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + proc createAvailability(): Availability = + let example = Availability.example + let totalSize = rand(100000..200000) + let availability = waitFor reservations.createAvailability( + totalSize.u256, + example.duration, + example.minPrice, + example.maxCollateral + ) + return availability.get + + proc createReservation(availability: Availability): Reservation = + let size = rand(1.. orig + check (updated.freeSize - orig) == 200.u256 + check (repo.quotaReservedBytes - origQuota) == 200.NBytes + + test "update releases quota when lowering size": + let + availability = createAvailability() + origQuota = repo.quotaReservedBytes + availability.totalSize = availability.totalSize - 100 + + check isOk await reservations.update(availability) + check (origQuota - repo.quotaReservedBytes) == 100.NBytes + + test "update reserves quota when growing size": + let + availability = createAvailability() + origQuota = repo.quotaReservedBytes + availability.totalSize = availability.totalSize + 100 + + check isOk await reservations.update(availability) + check (repo.quotaReservedBytes - origQuota) == 100.NBytes + + test "reservation can be partially released": + let availability = createAvailability() + let reservation = createReservation(availability) + check isOk await reservations.release( + reservation.id, + reservation.availabilityId, + 1 + ) + let key = reservation.key.get + let updated = !(await reservations.get(key, Reservation)) + check updated.size == reservation.size - 1 + + test "cannot release more bytes than size of reservation": + let availability = createAvailability() + let reservation = createReservation(availability) + let updated = await reservations.release( + reservation.id, + reservation.availabilityId, + (reservation.size + 1).truncate(uint) + ) + check updated.isErr + check updated.error of BytesOutOfBoundsError + + test "cannot release bytes from non-existant reservation": + let availability = createAvailability() + let reservation = createReservation(availability) + let updated = await reservations.release( + ReservationId.example, + availability.id, + 1 + ) + check updated.isErr + check updated.error of NotExistsError + + test "onAvailabilityAdded called when availability is created": + var added: Availability + reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + added = a + + let availability = createAvailability() + + check added == availability + + test "onAvailabilityAdded called when availability size is increased": + var availability = createAvailability() + var added: Availability + reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + added = a + availability.freeSize += 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "onAvailabilityAdded is not called when availability size is decreased": + var availability = createAvailability() + var called = false + reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + called = true + availability.freeSize -= 1.u256 + discard await reservations.update(availability) + + check not called + + test "availabilities can be found": + let availability = createAvailability() + + let found = await reservations.findAvailability( + availability.freeSize, + availability.duration, + availability.minPrice, + availability.maxCollateral) + + check found.isSome + check found.get == availability + + test "non-matching availabilities are not found": + let availability = createAvailability() + + let found = await reservations.findAvailability( + availability.freeSize + 1, + availability.duration, + availability.minPrice, + availability.maxCollateral) + + check found.isNone + + test "non-existant availability cannot be found": + let availability = Availability.example + let found = (await reservations.findAvailability( + availability.freeSize, + availability.duration, + availability.minPrice, + availability.maxCollateral + )) + check found.isNone + + test "non-existant availability cannot be retrieved": + let key = AvailabilityId.example.key.get + let got = await reservations.get(key, Availability) + check got.error of NotExistsError + + test "can get available bytes in repo": + check reservations.available == DefaultQuotaBytes.uint + + test "reports quota available to be reserved": + check reservations.hasAvailable(DefaultQuotaBytes.uint - 1) + + test "reports quota not available to be reserved": + check not reservations.hasAvailable(DefaultQuotaBytes.uint + 1) + + test "fails to create availability with size that is larger than available quota": + let created = await reservations.createAvailability( + (DefaultQuotaBytes.uint + 1).u256, + UInt256.example, + UInt256.example, + UInt256.example + ) + check created.isErr + check created.error of ReserveFailedError + check created.error.parent of QuotaNotEnoughError diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim new file mode 100644 index 00000000..0fdf3bf9 --- /dev/null +++ b/tests/codex/sales/testsales.nim @@ -0,0 +1,596 @@ +import std/sequtils +import std/sugar +import std/times +import pkg/chronos +import pkg/datastore/typedds +import pkg/questionable +import pkg/questionable/results +import pkg/codex/sales +import pkg/codex/sales/salesdata +import pkg/codex/sales/salescontext +import pkg/codex/sales/reservations +import pkg/codex/sales/slotqueue +import pkg/codex/stores/repostore +import pkg/codex/blocktype as bt +import pkg/codex/node +import ../../asynctest +import ../helpers +import ../helpers/mockmarket +import ../helpers/mockclock +import ../helpers/always +import ../examples +import ./helpers/periods + +asyncchecksuite "Sales - start": + let + proof = Groth16Proof.example + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + + var request: StorageRequest + var sales: Sales + var market: MockMarket + var clock: MockClock + var reservations: Reservations + var repo: RepoStore + var queue: SlotQueue + var itemsProcessed: seq[SlotQueueItem] + + setup: + request = StorageRequest( + ask: StorageAsk( + slots: 4, + slotSize: 100.u256, + duration: 60.u256, + reward: 10.u256, + collateral: 200.u256, + ), + content: StorageContent( + cid: "some cid" + ), + expiry: (getTime() + initDuration(hours=1)).toUnix.u256 + ) + + market = MockMarket.new() + clock = MockClock.new() + let repoDs = repoTmp.newDb() + let metaDs = metaTmp.newDb() + repo = RepoStore.new(repoDs, metaDs) + await repo.start() + sales = Sales.new(market, clock, repo) + reservations = sales.context.reservations + sales.onStore = proc(request: StorageRequest, + slot: UInt256, + onBatch: BatchProc): Future[?!void] {.async.} = + return success() + + sales.onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + return success() + + queue = sales.context.slotQueue + sales.onProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + return success(proof) + itemsProcessed = @[] + request.expiry = (clock.now() + 42).u256 + + teardown: + await sales.stop() + await repo.stop() + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + let address = await market.getSigner() + let slot = MockSlot(requestId: request.id, + slotIndex: slotIdx, + proof: proof, + host: address) + market.filled.add slot + market.slotState[slotId(request.id, slotIdx)] = SlotState.Filled + + test "load slots when Sales module starts": + let me = await market.getSigner() + + request.ask.slots = 2 + market.requested = @[request] + market.requestState[request.id] = RequestState.New + + let slot0 = MockSlot(requestId: request.id, + slotIndex: 0.u256, + proof: proof, + host: me) + await fillSlot(slot0.slotIndex) + + let slot1 = MockSlot(requestId: request.id, + slotIndex: 1.u256, + proof: proof, + host: me) + await fillSlot(slot1.slotIndex) + + market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.requested = @[request] + market.activeRequests[me] = @[request.id] + + await sales.start() + + check eventually sales.agents.len == 2 + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + +asyncchecksuite "Sales": + let + proof = Groth16Proof.example + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + + var availability: Availability + var request: StorageRequest + var sales: Sales + var market: MockMarket + var clock: MockClock + var reservations: Reservations + var repo: RepoStore + var queue: SlotQueue + var itemsProcessed: seq[SlotQueueItem] + + setup: + availability = Availability( + totalSize: 100.u256, + freeSize: 100.u256, + duration: 60.u256, + minPrice: 600.u256, + maxCollateral: 400.u256 + ) + request = StorageRequest( + ask: StorageAsk( + slots: 4, + slotSize: 100.u256, + duration: 60.u256, + reward: 10.u256, + collateral: 200.u256, + ), + content: StorageContent( + cid: "some cid" + ), + expiry: (getTime() + initDuration(hours=1)).toUnix.u256 + ) + + market = MockMarket.new() + + let me = await market.getSigner() + market.activeSlots[me] = @[] + market.requestEnds[request.id] = request.expiry.toSecondsSince1970 + + clock = MockClock.new() + let repoDs = repoTmp.newDb() + let metaDs = metaTmp.newDb() + repo = RepoStore.new(repoDs, metaDs) + await repo.start() + sales = Sales.new(market, clock, repo) + reservations = sales.context.reservations + sales.onStore = proc(request: StorageRequest, + slot: UInt256, + onBatch: BatchProc): Future[?!void] {.async.} = + return success() + + sales.onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + return success() + + queue = sales.context.slotQueue + sales.onProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + return success(proof) + await sales.start() + itemsProcessed = @[] + + teardown: + await sales.stop() + await repo.stop() + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + proc allowRequestToStart {.async.} = + # wait until we're in initialproving state + await sleepAsync(10.millis) + # it won't start proving until the next period + await clock.advanceToNextPeriod(market) + + proc getAvailability: Availability = + let key = availability.id.key.get + (waitFor reservations.get(key, Availability)).get + + proc createAvailability() = + let a = waitFor reservations.createAvailability( + availability.totalSize, + availability.duration, + availability.minPrice, + availability.maxCollateral + ) + availability = a.get # update id + + proc notProcessed(itemsProcessed: seq[SlotQueueItem], + request: StorageRequest): bool = + let items = SlotQueueItem.init(request) + for i in 0.. 0 + check market.filled[0].requestId == request.id + check market.filled[0].slotIndex < request.ask.slots.u256 + check market.filled[0].proof == proof + check market.filled[0].host == await market.getSigner() + + test "calls onFilled when slot is filled": + var soldRequest = StorageRequest.default + var soldSlotIndex = UInt256.high + sales.onSale = proc(request: StorageRequest, + slotIndex: UInt256) = + soldRequest = request + soldSlotIndex = slotIndex + createAvailability() + await market.requestStorage(request) + await allowRequestToStart() + + check eventually soldRequest == request + check soldSlotIndex < request.ask.slots.u256 + + test "calls onClear when storage becomes available again": + # fail the proof intentionally to trigger `agent.finish(success=false)`, + # which then calls the onClear callback + sales.onProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + raise newException(IOError, "proof failed") + var clearedRequest: StorageRequest + var clearedSlotIndex: UInt256 + sales.onClear = proc(request: StorageRequest, + slotIndex: UInt256) = + clearedRequest = request + clearedSlotIndex = slotIndex + createAvailability() + await market.requestStorage(request) + await allowRequestToStart() + + check eventually clearedRequest == request + check clearedSlotIndex < request.ask.slots.u256 + + test "makes storage available again when other host fills the slot": + let otherHost = Address.example + sales.onStore = proc(request: StorageRequest, + slot: UInt256, + onBatch: BatchProc): Future[?!void] {.async.} = + await sleepAsync(chronos.hours(1)) + return success() + createAvailability() + await market.requestStorage(request) + for slotIndex in 0.. agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + + test "deletes inactive reservations on load": + createAvailability() + discard await reservations.createReservation( + availability.id, + 100.u256, + RequestId.example, + UInt256.example) + check (await reservations.all(Reservation)).get.len == 1 + await sales.load() + check (await reservations.all(Reservation)).get.len == 0 + check getAvailability().freeSize == availability.freeSize # was restored diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim new file mode 100644 index 00000000..215f8bb4 --- /dev/null +++ b/tests/codex/sales/testsalesagent.nim @@ -0,0 +1,139 @@ +import std/times +import pkg/chronos +import pkg/codex/sales +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/sales/statemachine +import pkg/codex/sales/states/errorhandling + +import ../../asynctest +import ../helpers/mockmarket +import ../helpers/mockclock +import ../helpers +import ../examples + +var onCancelCalled = false +var onFailedCalled = false +var onSlotFilledCalled = false +var onErrorCalled = false + +type + MockState = ref object of SaleState + MockErrorState = ref object of ErrorHandlingState + +method `$`*(state: MockState): string = "MockState" +method `$`*(state: MockErrorState): string = "MockErrorState" + +method onCancelled*(state: MockState, request: StorageRequest): ?State = + onCancelCalled = true + +method onFailed*(state: MockState, request: StorageRequest): ?State = + onFailedCalled = true + +method onSlotFilled*(state: MockState, requestId: RequestId, + slotIndex: UInt256): ?State = + onSlotFilledCalled = true + +method onError*(state: MockErrorState, err: ref CatchableError): ?State = + onErrorCalled = true + +method run*(state: MockErrorState, machine: Machine): Future[?State] {.async.} = + raise newException(ValueError, "failure") + +asyncchecksuite "Sales agent": + let request = StorageRequest.example + var agent: SalesAgent + var context: SalesContext + var slotIndex: UInt256 + var market: MockMarket + var clock: MockClock + + setup: + market = MockMarket.new() + market.requestExpiry[request.id] = getTime().toUnix() + request.expiry.truncate(int64) + clock = MockClock.new() + context = SalesContext(market: market, clock: clock) + slotIndex = 0.u256 + onCancelCalled = false + onFailedCalled = false + onSlotFilledCalled = false + agent = newSalesAgent(context, + request.id, + slotIndex, + some request) + + teardown: + await agent.stop() + + test "can retrieve request": + agent = newSalesAgent(context, + request.id, + slotIndex, + none StorageRequest) + market.requested = @[request] + await agent.retrieveRequest() + check agent.data.request == some request + + test "subscribe assigns cancelled future": + await agent.subscribe() + check not agent.data.cancelled.isNil + + test "unsubscribe deassigns canceleld future": + await agent.subscribe() + await agent.unsubscribe() + check agent.data.cancelled.isNil + + test "subscribe can be called multiple times, without overwriting subscriptions/futures": + await agent.subscribe() + let cancelled = agent.data.cancelled + await agent.subscribe() + check cancelled == agent.data.cancelled + + test "unsubscribe can be called multiple times": + await agent.subscribe() + await agent.unsubscribe() + await agent.unsubscribe() + + test "current state onCancelled called when cancel emitted": + agent.start(MockState.new()) + await agent.subscribe() + market.requestState[request.id] = RequestState.Cancelled + clock.set(market.requestExpiry[request.id] + 1) + check eventually onCancelCalled + + for requestState in {RequestState.New, Started, Finished, Failed}: + test "onCancelled is not called when request state is " & $requestState: + agent.start(MockState.new()) + await agent.subscribe() + market.requestState[request.id] = requestState + clock.set(market.requestExpiry[request.id] + 1) + await sleepAsync(100.millis) + check not onCancelCalled + + for requestState in {RequestState.Started, Finished, Failed}: + test "cancelled future is finished when request state is " & $requestState: + agent.start(MockState.new()) + await agent.subscribe() + market.requestState[request.id] = requestState + clock.set(market.requestExpiry[request.id] + 1) + check eventually agent.data.cancelled.finished + + test "cancelled future is finished (cancelled) when onFulfilled called": + agent.start(MockState.new()) + await agent.subscribe() + agent.onFulfilled(request.id) + check eventually agent.data.cancelled.cancelled() + + test "current state onFailed called when onFailed called": + agent.start(MockState.new()) + agent.onFailed(request.id) + check eventually onFailedCalled + + test "current state onSlotFilled called when slot filled emitted": + agent.start(MockState.new()) + agent.onSlotFilled(request.id, slotIndex) + check eventually onSlotFilledCalled + + test "ErrorHandlingState.onError can be overridden at the state level": + agent.start(MockErrorState.new()) + check eventually onErrorCalled diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim new file mode 100644 index 00000000..193751c8 --- /dev/null +++ b/tests/codex/sales/testslotqueue.nim @@ -0,0 +1,586 @@ +import std/sequtils +import pkg/chronos +import pkg/datastore +import pkg/questionable +import pkg/questionable/results + +import pkg/codex/logutils +import pkg/codex/sales/slotqueue + +import ../../asynctest +import ../helpers +import ../helpers/mockmarket +import ../helpers/mockslotqueueitem +import ../examples + +suite "Slot queue start/stop": + + var queue: SlotQueue + + setup: + queue = SlotQueue.new() + + teardown: + await queue.stop() + + test "starts out not running": + check not queue.running + + test "can call start multiple times, and when already running": + asyncSpawn queue.start() + asyncSpawn queue.start() + check queue.running + + test "can call stop when alrady stopped": + await queue.stop() + check not queue.running + + test "can call stop when running": + asyncSpawn queue.start() + await queue.stop() + check not queue.running + + test "can call stop multiple times": + asyncSpawn queue.start() + await queue.stop() + await queue.stop() + check not queue.running + +suite "Slot queue workers": + + var queue: SlotQueue + + proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = + await sleepAsync(1000.millis) + # this is not illustrative of the realistic scenario as the + # `doneProcessing` future would be passed to another context before being + # completed and therefore is not as simple as making the callback async + doneProcessing.complete() + + setup: + let request = StorageRequest.example + queue = SlotQueue.new(maxSize = 5, maxWorkers = 3) + queue.onProcessSlot = onProcessSlot + + proc startQueue = asyncSpawn queue.start() + + teardown: + await queue.stop() + + test "activeWorkers should be 0 when not running": + check queue.activeWorkers == 0 + + test "maxWorkers cannot be 0": + expect ValueError: + discard SlotQueue.new(maxSize = 1, maxWorkers = 0) + + test "maxWorkers cannot surpass maxSize": + expect ValueError: + discard SlotQueue.new(maxSize = 1, maxWorkers = 2) + + test "does not surpass max workers": + startQueue() + let item1 = SlotQueueItem.example + let item2 = SlotQueueItem.example + let item3 = SlotQueueItem.example + let item4 = SlotQueueItem.example + check queue.push(item1).isOk + check queue.push(item2).isOk + check queue.push(item3).isOk + check queue.push(item4).isOk + check eventually queue.activeWorkers == 3 + + test "discards workers once processing completed": + proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} = + await sleepAsync(1.millis) + done.complete() + + queue.onProcessSlot = processSlot + + startQueue() + let item1 = SlotQueueItem.example + let item2 = SlotQueueItem.example + let item3 = SlotQueueItem.example + let item4 = SlotQueueItem.example + check queue.push(item1).isOk # finishes after 1.millis + check queue.push(item2).isOk # finishes after 1.millis + check queue.push(item3).isOk # finishes after 1.millis + check queue.push(item4).isOk + check eventually queue.activeWorkers == 1 + +suite "Slot queue": + + var onProcessSlotCalled = false + var onProcessSlotCalledWith: seq[(RequestId, uint16)] + var queue: SlotQueue + var paused: bool + + proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) = + queue = SlotQueue.new(maxWorkers, maxSize.uint16) + queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + await sleepAsync(processSlotDelay) + onProcessSlotCalled = true + onProcessSlotCalledWith.add (item.requestId, item.slotIndex) + done.complete() + asyncSpawn queue.start() + + setup: + onProcessSlotCalled = false + onProcessSlotCalledWith = @[] + + teardown: + paused = false + + await queue.stop() + + test "starts out empty": + newSlotQueue(maxSize = 2, maxWorkers = 2) + check queue.len == 0 + check $queue == "[]" + + test "reports correct size": + newSlotQueue(maxSize = 2, maxWorkers = 2) + check queue.size == 2 + + test "correctly compares SlotQueueItems": + var requestA = StorageRequest.example + requestA.ask.duration = 1.u256 + requestA.ask.reward = 1.u256 + check requestA.ask.pricePerSlot == 1.u256 + requestA.ask.collateral = 100000.u256 + requestA.expiry = 1001.u256 + + var requestB = StorageRequest.example + requestB.ask.duration = 100.u256 + requestB.ask.reward = 1000.u256 + check requestB.ask.pricePerSlot == 100000.u256 + requestB.ask.collateral = 1.u256 + requestB.expiry = 1000.u256 + + let itemA = SlotQueueItem.init(requestA, 0) + let itemB = SlotQueueItem.init(requestB, 0) + check itemB < itemA # B higher priority than A + check itemA > itemB + + test "correct prioritizes SlotQueueItems based on 'seen'": + let request = StorageRequest.example + let itemA = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, + duration: 1.u256, + reward: 2.u256, # profitability is higher (good) + collateral: 1.u256, + expiry: 1.u256, + seen: true # seen (bad), more weight than profitability + ) + let itemB = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, + duration: 1.u256, + reward: 1.u256, # profitability is lower (bad) + collateral: 1.u256, + expiry: 1.u256, + seen: false # not seen (good) + ) + check itemB.toSlotQueueItem < itemA.toSlotQueueItem # B higher priority than A + check itemA.toSlotQueueItem > itemB.toSlotQueueItem + + test "correct prioritizes SlotQueueItems based on profitability": + let request = StorageRequest.example + let itemA = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, + duration: 1.u256, + reward: 1.u256, # reward is lower (bad) + collateral: 1.u256, # collateral is lower (good) + expiry: 1.u256, + seen: false + ) + let itemB = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, + duration: 1.u256, + reward: 2.u256, # reward is higher (good), more weight than collateral + collateral: 2.u256, # collateral is higher (bad) + expiry: 1.u256, + seen: false + ) + + check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority + + test "correct prioritizes SlotQueueItems based on collateral": + let request = StorageRequest.example + let itemA = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, + duration: 1.u256, + reward: 1.u256, + collateral: 2.u256, # collateral is higher (bad) + expiry: 2.u256, # expiry is longer (good) + seen: false + ) + let itemB = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, + duration: 1.u256, + reward: 1.u256, + collateral: 1.u256, # collateral is lower (good), more weight than expiry + expiry: 1.u256, # expiry is shorter (bad) + seen: false + ) + + check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority + + test "correct prioritizes SlotQueueItems based on expiry": + let request = StorageRequest.example + let itemA = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, # slotSize is smaller (good) + duration: 1.u256, + reward: 1.u256, + collateral: 1.u256, + expiry: 1.u256, # expiry is shorter (bad) + seen: false + ) + let itemB = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 2.u256, # slotSize is larger (bad) + duration: 1.u256, + reward: 1.u256, + collateral: 1.u256, + expiry: 2.u256, # expiry is longer (good), more weight than slotSize + seen: false + ) + + check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority + + test "correct prioritizes SlotQueueItems based on slotSize": + let request = StorageRequest.example + let itemA = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 2.u256, # slotSize is larger (bad) + duration: 1.u256, + reward: 1.u256, + collateral: 1.u256, + expiry: 1.u256, # expiry is shorter (bad) + seen: false + ) + let itemB = MockSlotQueueItem( + requestId: request.id, + slotIndex: 0, + slotSize: 1.u256, # slotSize is smaller (good) + duration: 1.u256, + reward: 1.u256, + collateral: 1.u256, + expiry: 1.u256, + seen: false + ) + + check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority + + test "expands available all possible slot indices on init": + let request = StorageRequest.example + let items = SlotQueueItem.init(request) + check items.len.uint64 == request.ask.slots + var checked = 0 + for slotIndex in 0'u16.. 0): + let blk = bt.Block.new(chunk).tryGet() + discard await store.putBlock(blk) + blk + +proc createProtectedManifest*( + datasetBlocks: seq[bt.Block], + store: BlockStore, + numDatasetBlocks: int, + ecK: int, ecM: int, + blockSize: NBytes, + originalDatasetSize: int, + totalDatasetSize: int): + Future[tuple[manifest: Manifest, protected: Manifest]] {.async.} = + + let + cids = datasetBlocks.mapIt(it.cid) + datasetTree = CodexTree.init(cids[0.. " & $expected & ")": + let slotBlockIndex = toBlkInSlot(input, numCells = cellsPerBlock) + + check: + slotBlockIndex == expected + + for (input, expected) in [(10, 10), (31, 31), (32, 0), (63, 31), (64, 0)]: + test "Can get blockCellIndex from slotCellIndex (" & $input & " -> " & $expected & ")": + let blockCellIndex = toCellInBlk(input, numCells = cellsPerBlock) + + check: + blockCellIndex == expected diff --git a/tests/codex/slots/testbackends.nim b/tests/codex/slots/testbackends.nim new file mode 100644 index 00000000..b9994fcd --- /dev/null +++ b/tests/codex/slots/testbackends.nim @@ -0,0 +1,3 @@ +import ./backends/testcircomcompat + +{.warning[UnusedImport]: off.} diff --git a/tests/codex/slots/testconverters.nim b/tests/codex/slots/testconverters.nim new file mode 100644 index 00000000..cf18d6b2 --- /dev/null +++ b/tests/codex/slots/testconverters.nim @@ -0,0 +1,47 @@ +import pkg/chronos +import pkg/poseidon2 +import pkg/poseidon2/io +import pkg/constantine/math/io/io_fields +import pkg/questionable/results +import pkg/codex/merkletree +import pkg/codex/slots/converters + +import ../../asynctest +import ../examples +import ../merkletree/helpers + +let + hash: Poseidon2Hash = toF(12345) + +suite "Converters": + test "CellBlock cid": + let + cid = toCellCid(hash).tryGet() + value = fromCellCid(cid).tryGet() + + check: + hash.toDecimal() == value.toDecimal() + + test "Slot cid": + let + cid = toSlotCid(hash).tryGet() + value = fromSlotCid(cid).tryGet() + + check: + hash.toDecimal() == value.toDecimal() + + test "Verify cid": + let + cid = toVerifyCid(hash).tryGet() + value = fromVerifyCid(cid).tryGet() + + check: + hash.toDecimal() == value.toDecimal() + + test "Proof": + let + codexProof = toEncodableProof(Poseidon2Proof.example).tryGet() + poseidonProof = toVerifiableProof(codexProof).tryGet() + + check: + Poseidon2Proof.example == poseidonProof diff --git a/tests/codex/slots/testprover.nim b/tests/codex/slots/testprover.nim new file mode 100644 index 00000000..6acc8602 --- /dev/null +++ b/tests/codex/slots/testprover.nim @@ -0,0 +1,92 @@ +import std/sequtils +import std/sugar +import std/math + +import ../../asynctest + +import pkg/chronos +import pkg/libp2p/cid +import pkg/datastore + +import pkg/codex/merkletree +import pkg/codex/rng +import pkg/codex/manifest +import pkg/codex/chunker +import pkg/codex/blocktype as bt +import pkg/codex/slots +import pkg/codex/stores +import pkg/poseidon2/io +import pkg/codex/utils/poseidon2digest + +import ./helpers +import ../helpers +import ./backends/helpers + +suite "Test Prover": + let + samples = 5 + blockSize = DefaultBlockSize + cellSize = DefaultCellSize + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + r1cs = "tests/circuits/fixtures/proof_main.r1cs" + wasm = "tests/circuits/fixtures/proof_main.wasm" + circomBackend = CircomCompat.init(r1cs, wasm) + challenge = 1234567.toF.toBytes.toArray32 + + var + store: BlockStore + prover: Prover + + setup: + let + repoDs = repoTmp.newDb() + metaDs = metaTmp.newDb() + + store = RepoStore.new(repoDs, metaDs) + prover = Prover.new(store, circomBackend, samples) + + teardown: + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + test "Should sample and prove a slot": + let + (_, _, verifiable) = + await createVerifiableManifest( + store, + 8, # number of blocks in the original dataset (before EC) + 5, # ecK + 3, # ecM + blockSize, + cellSize) + + let + (inputs, proof) = ( + await prover.prove(1, verifiable, challenge)).tryGet + + check: + (await prover.verify(proof, inputs)).tryGet == true + + test "Should generate valid proofs when slots consist of single blocks": + + # To get single-block slots, we just need to set the number of blocks in + # the original dataset to be the same as ecK. The total number of blocks + # after generating random data for parity will be ecK + ecM, which will + # match the number of slots. + let + (_, _, verifiable) = + await createVerifiableManifest( + store, + 2, # number of blocks in the original dataset (before EC) + 2, # ecK + 1, # ecM + blockSize, + cellSize) + + let + (inputs, proof) = ( + await prover.prove(1, verifiable, challenge)).tryGet + + check: + (await prover.verify(proof, inputs)).tryGet == true diff --git a/tests/codex/slots/testsampler.nim b/tests/codex/slots/testsampler.nim new file mode 100644 index 00000000..50a40c2c --- /dev/null +++ b/tests/codex/slots/testsampler.nim @@ -0,0 +1,4 @@ +import ./sampler/testsampler +import ./sampler/testutils + +{.warning[UnusedImport]: off.} diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim new file mode 100644 index 00000000..583e6d38 --- /dev/null +++ b/tests/codex/slots/testslotbuilder.nim @@ -0,0 +1,373 @@ +import std/sequtils +import std/math +import std/importutils +import std/sugar + +import ../../asynctest + +import pkg/chronos +import pkg/questionable/results +import pkg/codex/blocktype as bt +import pkg/codex/rng +import pkg/codex/stores +import pkg/codex/chunker +import pkg/codex/merkletree +import pkg/codex/manifest {.all.} +import pkg/codex/utils +import pkg/codex/utils/digest +import pkg/codex/utils/poseidon2digest +import pkg/datastore +import pkg/poseidon2 +import pkg/poseidon2/io +import pkg/constantine/math/io/io_fields + +import ./helpers +import ../helpers +import ../examples +import ../merkletree/helpers + +import pkg/codex/indexingstrategy {.all.} +import pkg/codex/slots {.all.} + +privateAccess(Poseidon2Builder) # enable access to private fields +privateAccess(Manifest) # enable access to private fields + +const + Strategy = SteppedStrategy + +suite "Slot builder": + let + blockSize = NBytes 1024 + cellSize = NBytes 64 + ecK = 3 + ecM = 2 + + numSlots = ecK + ecM + numDatasetBlocks = 8 + numTotalBlocks = calcEcBlocksCount(numDatasetBlocks, ecK, ecM) # total number of blocks in the dataset after + # EC (should will match number of slots) + originalDatasetSize = numDatasetBlocks * blockSize.int + totalDatasetSize = numTotalBlocks * blockSize.int + + numSlotBlocks = numTotalBlocks div numSlots + numBlockCells = (blockSize div cellSize).int # number of cells per block + numSlotCells = numSlotBlocks * numBlockCells # number of uncorrected slot cells + pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot + numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks # pow2 blocks per slot + + numSlotBlocksTotal = # pad blocks per slot + if numPadSlotBlocks > 0: + numPadSlotBlocks + numSlotBlocks + else: + numSlotBlocks + + numBlocksTotal = numSlotBlocksTotal * numSlots + + # empty digest + emptyDigest = SpongeMerkle.digest(newSeq[byte](blockSize.int), cellSize.int) + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + + var + datasetBlocks: seq[bt.Block] + padBlocks: seq[bt.Block] + localStore: BlockStore + manifest: Manifest + protectedManifest: Manifest + builder: Poseidon2Builder + chunker: Chunker + + setup: + let + repoDs = repoTmp.newDb() + metaDs = metaTmp.newDb() + + localStore = RepoStore.new(repoDs, metaDs) + chunker = RandomChunker.new(Rng.instance(), size = totalDatasetSize, chunkSize = blockSize) + datasetBlocks = await chunker.createBlocks(localStore) + + (manifest, protectedManifest) = + await createProtectedManifest( + datasetBlocks, + localStore, + numDatasetBlocks, + ecK, ecM, + blockSize, + originalDatasetSize, + totalDatasetSize) + + teardown: + await localStore.close() + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + # TODO: THIS IS A BUG IN asynctest, because it doesn't release the + # objects after the test is done, so we need to do it manually + # + # Need to reset all objects because otherwise they get + # captured by the test runner closures, not good! + reset(datasetBlocks) + reset(localStore) + reset(manifest) + reset(protectedManifest) + reset(builder) + reset(chunker) + + test "Can only create builder with protected manifest": + let + unprotectedManifest = Manifest.new( + treeCid = Cid.example, + blockSize = blockSize.NBytes, + datasetSize = originalDatasetSize.NBytes) + + check: + Poseidon2Builder.new(localStore, unprotectedManifest, cellSize = cellSize) + .error.msg == "Manifest is not protected." + + test "Number of blocks must be devisable by number of slots": + let + mismatchManifest = Manifest.new( + manifest = Manifest.new( + treeCid = Cid.example, + blockSize = blockSize.NBytes, + datasetSize = originalDatasetSize.NBytes), + treeCid = Cid.example, + datasetSize = totalDatasetSize.NBytes, + ecK = ecK - 1, + ecM = ecM, + strategy = Strategy) + + check: + Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize) + .error.msg == "Number of blocks must be divisable by number of slots." + + test "Block size must be divisable by cell size": + let + mismatchManifest = Manifest.new( + manifest = Manifest.new( + treeCid = Cid.example, + blockSize = (blockSize + 1).NBytes, + datasetSize = (originalDatasetSize - 1).NBytes), + treeCid = Cid.example, + datasetSize = (totalDatasetSize - 1).NBytes, + ecK = ecK, + ecM = ecM, + strategy = Strategy) + + check: + Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize) + .error.msg == "Block size must be divisable by cell size." + + test "Should build correct slot builder": + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + check: + builder.cellSize == cellSize + builder.numSlots == numSlots + builder.numBlockCells == numBlockCells + builder.numSlotBlocks == numSlotBlocksTotal + builder.numSlotCells == pow2SlotCells + builder.numBlocks == numBlocksTotal + + test "Should build slot hashes for all slots": + let + steppedStrategy = Strategy.init( + 0, numBlocksTotal - 1, numSlots) + + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + for i in 0.. (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + + cellHashes = (await builder.getCellHashes(i)).tryGet() + + check: + cellHashes.len == expectedHashes.len + cellHashes == expectedHashes + + test "Should build slot trees for all slots": + let + steppedStrategy = Strategy.init( + 0, numBlocksTotal - 1, numSlots) + + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + for i in 0.. (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + + expectedRoot = Merkle.digest(expectedHashes) + slotTree = (await builder.buildSlotTree(i)).tryGet() + + check: + slotTree.root().tryGet() == expectedRoot + + test "Should persist trees for all slots": + let + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + for i in 0.. (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + + Merkle.digest(slotHashes) + + expectedRoot = Merkle.digest(slotsHashes) + rootHash = builder.buildVerifyTree(builder.slotRoots).tryGet().root.tryGet() + + check: + expectedRoot == rootHash + + test "Should build correct verification root manifest": + let + steppedStrategy = Strategy.init(0, numBlocksTotal - 1, numSlots) + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + slotsHashes = collect(newSeq): + for i in 0.. (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + + Merkle.digest(slotHashes) + + expectedRoot = Merkle.digest(slotsHashes) + manifest = (await builder.buildManifest()).tryGet() + mhash = manifest.verifyRoot.mhash.tryGet() + mhashBytes = mhash.digestBytes + rootHash = Poseidon2Hash.fromBytes(mhashBytes.toArray32).get + + check: + expectedRoot == rootHash + + test "Should not build from verifiable manifest with 0 slots": + var + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + verifyManifest = (await builder.buildManifest()).tryGet() + + verifyManifest.slotRoots = @[] + check Poseidon2Builder.new( + localStore, + verifyManifest, + cellSize = cellSize).isErr + + test "Should not build from verifiable manifest with incorrect number of slots": + var + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + verifyManifest = (await builder.buildManifest()).tryGet() + + verifyManifest.slotRoots.del( + verifyManifest.slotRoots.len - 1 + ) + + check Poseidon2Builder.new( + localStore, + verifyManifest, + cellSize = cellSize).isErr + + test "Should not build from verifiable manifest with invalid verify root": + let + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + var + verifyManifest = (await builder.buildManifest()).tryGet() + + rng.shuffle( + Rng.instance, + verifyManifest.verifyRoot.data.buffer) + + check Poseidon2Builder.new( + localStore, + verifyManifest, + cellSize = cellSize).isErr + + test "Should build from verifiable manifest": + let + builder = Poseidon2Builder.new( + localStore, + protectedManifest, + cellSize = cellSize).tryGet() + + verifyManifest = (await builder.buildManifest()).tryGet() + + verificationBuilder = Poseidon2Builder.new( + localStore, + verifyManifest, + cellSize = cellSize).tryGet() + + check: + builder.slotRoots == verificationBuilder.slotRoots + builder.verifyRoot == verificationBuilder.verifyRoot diff --git a/tests/codex/storageproofs/testnetwork.nim b/tests/codex/storageproofs/testnetwork.nim deleted file mode 100644 index 3d31d7c0..00000000 --- a/tests/codex/storageproofs/testnetwork.nim +++ /dev/null @@ -1,124 +0,0 @@ -import std/os -import std/sequtils - -import pkg/asynctest -import pkg/chronos -import pkg/libp2p -import pkg/libp2p/errors -import pkg/contractabi as ca - -import pkg/codex/rng -import pkg/codex/chunker -import pkg/codex/storageproofs -import pkg/codex/discovery -import pkg/codex/manifest -import pkg/codex/stores -import pkg/codex/storageproofs as st -import pkg/codex/blocktype as bt -import pkg/codex/streams - -import ../examples -import ../helpers - -const - BlockSize = 31 * 64 - SectorSize = 31 - SectorsPerBlock = BlockSize div SectorSize - DataSetSize = BlockSize * 100 - -suite "Storage Proofs Network": - let - rng = Rng.instance() - seckey1 = PrivateKey.random(rng[]).tryGet() - seckey2 = PrivateKey.random(rng[]).tryGet() - hostAddr1 = ca.Address.example - hostAddr2 = ca.Address.example - blocks = toSeq([1, 5, 10, 14, 20, 12, 22]) # TODO: maybe make them random - - var - stpNetwork1: StpNetwork - stpNetwork2: StpNetwork - switch1: Switch - switch2: Switch - discovery1: MockDiscovery - discovery2: MockDiscovery - - chunker: RandomChunker - manifest: Manifest - store: BlockStore - ssk: st.SecretKey - spk: st.PublicKey - stpstore: st.StpStore - porMsg: PorMessage - cid: Cid - por: PoR - tags: seq[Tag] - - setupAll: - chunker = RandomChunker.new(Rng.instance(), size = DataSetSize, chunkSize = BlockSize) - store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize) - manifest = Manifest.new(blockSize = BlockSize).tryGet() - (spk, ssk) = st.keyGen() - - while ( - let chunk = await chunker.getBytes(); - chunk.len > 0): - - let blk = bt.Block.new(chunk).tryGet() - manifest.add(blk.cid) - (await store.putBlock(blk)).tryGet() - - cid = manifest.cid.tryGet() - por = await PoR.init( - StoreStream.new(store, manifest), - ssk, spk, - BlockSize) - - porMsg = por.toMessage() - tags = blocks.mapIt( - Tag(idx: it, tag: porMsg.authenticators[it]) ) - - setup: - switch1 = newStandardSwitch() - switch2 = newStandardSwitch() - - discovery1 = MockDiscovery.new() - discovery2 = MockDiscovery.new() - - stpNetwork1 = StpNetwork.new(switch1, discovery1) - stpNetwork2 = StpNetwork.new(switch2, discovery2) - - switch1.mount(stpNetwork1) - switch2.mount(stpNetwork2) - - await switch1.start() - await switch2.start() - - teardown: - await switch1.stop() - await switch2.stop() - - test "Should upload to host": - var - done = newFuture[void]() - - discovery1.findHostProvidersHandler = proc(d: MockDiscovery, host: ca.Address): - Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - check hostAddr2 == host - return @[switch2.peerInfo.signedPeerRecord] - - proc tagsHandler(msg: TagsMessage) {.async, gcsafe.} = - check: - Cid.init(msg.cid).tryGet() == cid - msg.tags == tags - - done.complete() - - stpNetwork2.tagsHandle = tagsHandler - (await stpNetwork1.uploadTags( - cid, - blocks, - porMsg.authenticators, - hostAddr2)).tryGet() - - await done.wait(1.seconds) diff --git a/tests/codex/storageproofs/testpor.nim b/tests/codex/storageproofs/testpor.nim deleted file mode 100644 index e1527aff..00000000 --- a/tests/codex/storageproofs/testpor.nim +++ /dev/null @@ -1,160 +0,0 @@ -import pkg/chronos -import pkg/asynctest - -import pkg/blscurve/blst/blst_abi - -import pkg/codex/streams -import pkg/codex/storageproofs as st -import pkg/codex/stores -import pkg/codex/manifest -import pkg/codex/chunker -import pkg/codex/rng -import pkg/codex/blocktype as bt - -import ../helpers - -const - BlockSize = 31 * 4 - SectorSize = 31 - SectorsPerBlock = BlockSize div SectorSize - DataSetSize = BlockSize * 100 - -suite "BLS PoR": - var - chunker: RandomChunker - manifest: Manifest - store: BlockStore - ssk: st.SecretKey - spk: st.PublicKey - - setup: - chunker = RandomChunker.new(Rng.instance(), size = DataSetSize, chunkSize = BlockSize) - store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize) - manifest = Manifest.new(blockSize = BlockSize).tryGet() - (spk, ssk) = st.keyGen() - - while ( - let chunk = await chunker.getBytes(); - chunk.len > 0): - - let blk = bt.Block.new(chunk).tryGet() - manifest.add(blk.cid) - (await store.putBlock(blk)).tryGet() - - test "Test PoR without corruption": - let - por = await PoR.init( - StoreStream.new(store, manifest), - ssk, - spk, - BlockSize) - q = generateQuery(por.tau, 22) - proof = await generateProof( - StoreStream.new(store, manifest), - q, - por.authenticators, - SectorsPerBlock) - - check por.verifyProof(q, proof.mu, proof.sigma) - - test "Test PoR with corruption - query: 22, corrupted blocks: 300, bytes: 10": - let - por = await PoR.init( - StoreStream.new(store, manifest), - ssk, - spk, - BlockSize) - pos = await store.corruptBlocks(manifest, 30, 10) - q = generateQuery(por.tau, 22) - proof = await generateProof( - StoreStream.new(store, manifest), - q, - por.authenticators, - SectorsPerBlock) - - check pos.len == 30 - check not por.verifyProof(q, proof.mu, proof.sigma) - -suite "Test Serialization": - var - chunker: RandomChunker - manifest: Manifest - store: BlockStore - ssk: st.SecretKey - spk: st.PublicKey - por: PoR - q: seq[QElement] - proof: Proof - - setupAll: - chunker = RandomChunker.new(Rng.instance(), size = DataSetSize, chunkSize = BlockSize) - store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize) - manifest = Manifest.new(blockSize = BlockSize).tryGet() - - while ( - let chunk = await chunker.getBytes(); - chunk.len > 0): - - let blk = bt.Block.new(chunk).tryGet() - manifest.add(blk.cid) - (await store.putBlock(blk)).tryGet() - - (spk, ssk) = st.keyGen() - por = await PoR.init( - StoreStream.new(store, manifest), - ssk, - spk, - BlockSize) - q = generateQuery(por.tau, 22) - proof = await generateProof( - StoreStream.new(store, manifest), - q, - por.authenticators, - SectorsPerBlock) - - test "Serialize Public Key": - var - spkMessage = spk.toMessage() - - check: - spk.signkey == spkMessage.fromMessage().tryGet().signkey - spk.key.blst_p2_is_equal(spkMessage.fromMessage().tryGet().key).bool - - test "Serialize TauZero": - var - tauZeroMessage = por.tau.t.toMessage() - tauZero = tauZeroMessage.fromMessage().tryGet() - - check: - por.tau.t.name == tauZero.name - por.tau.t.n == tauZero.n - - for i in 0.. 0): - - let blk = bt.Block.new(chunk).tryGet() - manifest.add(blk.cid) - (await store.putBlock(blk)).tryGet() - - cid = manifest.cid.tryGet() - por = await PoR.init( - StoreStream.new(store, manifest), - ssk, spk, - BlockSize) - - porMsg = por.toMessage() - tags = blocks.mapIt( - Tag(idx: it, tag: porMsg.authenticators[it]) ) - - repoDir = getAppDir() / "stp" - createDir(repoDir) - stpstore = st.StpStore.init(repoDir) - - teardownAll: - removeDir(repoDir) - - test "Should store Storage Proofs": - check (await stpstore.store(por.toMessage(), cid)).isOk - check fileExists(stpstore.stpPath(cid) / "por") - - test "Should retrieve Storage Proofs": - check (await stpstore.retrieve(cid)).tryGet() == porMsg - - test "Should store tags": - check (await stpstore.store(tags, cid)).isOk - for t in tags: - check fileExists(stpstore.stpPath(cid) / $t.idx ) - - test "Should retrieve tags": - check (await stpstore.retrieve(cid, blocks)).tryGet() == tags diff --git a/tests/codex/stores/commonstoretests.nim b/tests/codex/stores/commonstoretests.nim new file mode 100644 index 00000000..863b59d4 --- /dev/null +++ b/tests/codex/stores/commonstoretests.nim @@ -0,0 +1,155 @@ +import std/sequtils +import std/strutils +import std/options + +import pkg/chronos +import pkg/libp2p/multicodec +import pkg/stew/byteutils +import pkg/questionable +import pkg/questionable/results +import pkg/codex/stores/cachestore +import pkg/codex/chunker +import pkg/codex/manifest +import pkg/codex/merkletree +import pkg/codex/utils + +import ../../asynctest +import ../helpers + +type + StoreProvider* = proc(): BlockStore {.gcsafe.} + Before* = proc(): Future[void] {.gcsafe.} + After* = proc(): Future[void] {.gcsafe.} + +proc commonBlockStoreTests*(name: string, + provider: StoreProvider, + before: Before = nil, + after: After = nil) = + + asyncchecksuite name & " Store Common": + var + newBlock, newBlock1, newBlock2, newBlock3: Block + manifest: Manifest + tree: CodexTree + store: BlockStore + + setup: + newBlock = Block.new("New Kids on the Block".toBytes()).tryGet() + newBlock1 = Block.new("1".repeat(100).toBytes()).tryGet() + newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet() + newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet() + + (manifest, tree) = makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() + + if not isNil(before): + await before() + + store = provider() + + teardown: + await store.close() + + if not isNil(after): + await after() + + test "putBlock": + (await store.putBlock(newBlock1)).tryGet() + check (await store.hasBlock(newBlock1.cid)).tryGet() + + test "getBlock": + (await store.putBlock(newBlock)).tryGet() + let blk = await store.getBlock(newBlock.cid) + check blk.tryGet() == newBlock + + test "fail getBlock": + expect BlockNotFoundError: + discard (await store.getBlock(newBlock.cid)).tryGet() + + test "hasBlock": + (await store.putBlock(newBlock)).tryGet() + + check: + (await store.hasBlock(newBlock.cid)).tryGet() + await newBlock.cid in store + + test "fail hasBlock": + check: + not (await store.hasBlock(newBlock.cid)).tryGet() + not (await newBlock.cid in store) + + test "delBlock": + (await store.putBlock(newBlock1)).tryGet() + check (await store.hasBlock(newBlock1.cid)).tryGet() + + (await store.delBlock(newBlock1.cid)).tryGet() + + check not (await store.hasBlock(newBlock1.cid)).tryGet() + + test "listBlocks Blocks": + let + blocks = @[newBlock1, newBlock2, newBlock3] + + putHandles = await allFinished( + blocks.mapIt( store.putBlock( it ) )) + + for handle in putHandles: + check not handle.failed + check handle.read.isOk + + let + cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet() + + var count = 0 + for c in cids: + if cid =? await c: + check (await store.hasBlock(cid)).tryGet() + count.inc + + check count == 3 + + test "listBlocks Manifest": + let + blocks = @[newBlock1, newBlock2, newBlock3] + manifestBlock = Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + treeBlock = Block.new(tree.encode()).tryGet() + putHandles = await allFinished( + (@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) )) + + for handle in putHandles: + check not handle.failed + check handle.read.isOk + + let + cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet() + + var count = 0 + for c in cids: + if cid =? (await c): + check manifestBlock.cid == cid + check (await store.hasBlock(cid)).tryGet() + count.inc + + check count == 1 + + test "listBlocks Both": + let + blocks = @[newBlock1, newBlock2, newBlock3] + manifestBlock = Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + treeBlock = Block.new(tree.encode()).tryGet() + putHandles = await allFinished( + (@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) )) + + for handle in putHandles: + check not handle.failed + check handle.read.isOk + + let + cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet() + + var count = 0 + for c in cids: + if cid =? (await c): + check (await store.hasBlock(cid)).tryGet() + count.inc + + check count == 5 diff --git a/tests/codex/stores/repostore/testcoders.nim b/tests/codex/stores/repostore/testcoders.nim new file mode 100644 index 00000000..47cf4097 --- /dev/null +++ b/tests/codex/stores/repostore/testcoders.nim @@ -0,0 +1,71 @@ +import std/unittest +import std/random + +import pkg/stew/objects +import pkg/questionable +import pkg/questionable/results + +import pkg/codex/clock +import pkg/codex/stores/repostore/types +import pkg/codex/stores/repostore/coders + +import ../../helpers + +checksuite "Test coders": + + proc rand(T: type NBytes): T = + rand(Natural).NBytes + + proc rand(E: type[enum]): E = + let ordinals = enumRangeInt64(E) + E(ordinals[rand(ordinals.len - 1)]) + + proc rand(T: type QuotaUsage): T = + QuotaUsage( + used: rand(NBytes), + reserved: rand(NBytes) + ) + + proc rand(T: type BlockMetadata): T = + BlockMetadata( + expiry: rand(SecondsSince1970), + size: rand(NBytes), + refCount: rand(Natural) + ) + + proc rand(T: type DeleteResult): T = + DeleteResult( + kind: rand(DeleteResultKind), + released: rand(NBytes) + ) + + proc rand(T: type StoreResult): T = + StoreResult( + kind: rand(StoreResultKind), + used: rand(NBytes) + ) + + test "Natural encode/decode": + for val in newSeqWith[Natural](100, rand(Natural)) & @[Natural.low, Natural.high]: + check: + success(val) == Natural.decode(encode(val)) + + test "QuotaUsage encode/decode": + for val in newSeqWith[QuotaUsage](100, rand(QuotaUsage)): + check: + success(val) == QuotaUsage.decode(encode(val)) + + test "BlockMetadata encode/decode": + for val in newSeqWith[BlockMetadata](100, rand(BlockMetadata)): + check: + success(val) == BlockMetadata.decode(encode(val)) + + test "DeleteResult encode/decode": + for val in newSeqWith[DeleteResult](100, rand(DeleteResult)): + check: + success(val) == DeleteResult.decode(encode(val)) + + test "StoreResult encode/decode": + for val in newSeqWith[StoreResult](100, rand(StoreResult)): + check: + success(val) == StoreResult.decode(encode(val)) diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index 1e5dc043..51c59bbf 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -1,17 +1,17 @@ import std/strutils -import std/options import pkg/chronos -import pkg/asynctest -import pkg/libp2p import pkg/stew/byteutils import pkg/questionable/results import pkg/codex/stores/cachestore import pkg/codex/chunker +import ./commonstoretests + +import ../../asynctest import ../helpers -suite "Cache Store": +checksuite "Cache Store": var newBlock, newBlock1, newBlock2, newBlock3: Block store: CacheStore @@ -29,9 +29,10 @@ suite "Cache Store": discard CacheStore.new(cacheSize = 1, chunkSize = 2) store = CacheStore.new(cacheSize = 100, chunkSize = 1) - check store.currentSize == 0 + check store.currentSize == 0'nb + store = CacheStore.new(@[newBlock1, newBlock2, newBlock3]) - check store.currentSize == 300 + check store.currentSize == 300'nb # initial cache blocks total more than cache size, currentSize should # never exceed max cache size @@ -39,7 +40,7 @@ suite "Cache Store": blocks = @[newBlock1, newBlock2, newBlock3], cacheSize = 200, chunkSize = 1) - check store.currentSize == 200 + check store.currentSize == 200'nb # cache size cannot be less than chunks size expect ValueError: @@ -48,7 +49,6 @@ suite "Cache Store": chunkSize = 100) test "putBlock": - (await store.putBlock(newBlock1)).tryGet() check (await store.hasBlock(newBlock1.cid)).tryGet() @@ -66,62 +66,8 @@ suite "Cache Store": not (await store.hasBlock(newBlock1.cid)).tryGet() (await store.hasBlock(newBlock2.cid)).tryGet() (await store.hasBlock(newBlock2.cid)).tryGet() - store.currentSize == newBlock2.data.len + newBlock3.data.len # 200 + store.currentSize.int == newBlock2.data.len + newBlock3.data.len # 200 - test "getBlock": - store = CacheStore.new(@[newBlock]) - - let blk = await store.getBlock(newBlock.cid) - check blk.tryGet() == newBlock - - test "fail getBlock": - let blk = await store.getBlock(newBlock.cid) - check: - blk.isErr - blk.error of BlockNotFoundError - - test "hasBlock": - let store = CacheStore.new(@[newBlock]) - check: - (await store.hasBlock(newBlock.cid)).tryGet() - await newBlock.cid in store - - test "fail hasBlock": - check: - not (await store.hasBlock(newBlock.cid)).tryGet() - not (await newBlock.cid in store) - - test "delBlock": - # empty cache - (await store.delBlock(newBlock1.cid)).tryGet() - check not (await store.hasBlock(newBlock1.cid)).tryGet() - - (await store.putBlock(newBlock1)).tryGet() - check (await store.hasBlock(newBlock1.cid)).tryGet() - - # successfully deleted - (await store.delBlock(newBlock1.cid)).tryGet() - check not (await store.hasBlock(newBlock1.cid)).tryGet() - - # deletes item should decrement size - store = CacheStore.new(@[newBlock1, newBlock2, newBlock3]) - check: - store.currentSize == 300 - - (await store.delBlock(newBlock2.cid)).tryGet() - - check: - store.currentSize == 200 - not (await store.hasBlock(newBlock2.cid)).tryGet() - - test "listBlocks": - (await store.putBlock(newBlock1)).tryGet() - - var listed = false - (await store.listBlocks( - proc(cid: Cid) {.gcsafe, async.} = - check (await store.hasBlock(cid)).tryGet() - listed = true - )).tryGet() - - check listed +commonBlockStoreTests( + "Cache", proc: BlockStore = + BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1))) diff --git a/tests/codex/stores/testfsstore.nim b/tests/codex/stores/testfsstore.nim deleted file mode 100644 index e178f9a4..00000000 --- a/tests/codex/stores/testfsstore.nim +++ /dev/null @@ -1,88 +0,0 @@ -import std/os -import std/options - -import pkg/questionable -import pkg/questionable/results - -import pkg/chronos -import pkg/asynctest -import pkg/libp2p -import pkg/stew/byteutils - -import pkg/codex/stores/cachestore -import pkg/codex/chunker -import pkg/codex/stores -import pkg/codex/blocktype as bt - -import ../helpers - -proc runSuite(cache: bool) = - suite "FS Store " & (if cache: "(cache enabled)" else: "(cache disabled)"): - var - store: FSStore - repoDir: string - newBlock = bt.Block.new("New Block".toBytes()).tryGet() - - setup: - repoDir = getAppDir() / "repo" - createDir(repoDir) - - if cache: - store = FSStore.new(repoDir) - else: - store = FSStore.new(repoDir, postfixLen = 2, cache = nil) - - teardown: - removeDir(repoDir) - - test "putBlock": - (await store.putBlock(newBlock)).tryGet() - check: - fileExists(store.blockPath(newBlock.cid)) - (await store.hasBlock(newBlock.cid)).tryGet() - await newBlock.cid in store - - test "getBlock": - createDir(store.blockPath(newBlock.cid).parentDir) - writeFile(store.blockPath(newBlock.cid), newBlock.data) - let blk = await store.getBlock(newBlock.cid) - check blk.tryGet() == newBlock - - test "fail getBlock": - let blk = await store.getBlock(newBlock.cid) - check: - blk.isErr - blk.error of BlockNotFoundError - - test "hasBlock": - createDir(store.blockPath(newBlock.cid).parentDir) - writeFile(store.blockPath(newBlock.cid), newBlock.data) - - check: - (await store.hasBlock(newBlock.cid)).tryGet() - await newBlock.cid in store - - test "fail hasBlock": - check: - not (await store.hasBlock(newBlock.cid)).tryGet() - not (await newBlock.cid in store) - - test "listBlocks": - createDir(store.blockPath(newBlock.cid).parentDir) - writeFile(store.blockPath(newBlock.cid), newBlock.data) - - (await store.listBlocks( - proc(cid: Cid) {.gcsafe, async.} = - check cid == newBlock.cid - )).tryGet() - - test "delBlock": - createDir(store.blockPath(newBlock.cid).parentDir) - writeFile(store.blockPath(newBlock.cid), newBlock.data) - - (await store.delBlock(newBlock.cid)).tryGet() - - check not fileExists(store.blockPath(newBlock.cid)) - -runSuite(cache = true) -runSuite(cache = false) diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim new file mode 100644 index 00000000..b885220f --- /dev/null +++ b/tests/codex/stores/testkeyutils.nim @@ -0,0 +1,93 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/random +import std/sequtils +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/codex/blocktype as bt +import pkg/codex/stores/repostore +import pkg/codex/clock + +import ../../asynctest +import ../helpers/mocktimer +import ../helpers/mockrepostore +import ../helpers/mockclock +import ../examples + +import codex/namespaces +import codex/stores/keyutils + +proc createManifestCid(): ?!Cid = + let + length = rand(4096) + bytes = newSeqWith(length, rand(uint8)) + mcodec = Sha256HashCodec + codec = ManifestCodec + version = CIDv1 + + let hash = ? MultiHash.digest($mcodec, bytes).mapFailure + let cid = ? Cid.init(version, codec, hash).mapFailure + return success cid + +checksuite "KeyUtils": + test "makePrefixKey should create block key": + let length = 6 + let cid = Cid.example + let expectedPrefix = ($cid)[^length..^1] + let expectedPostfix = $cid + + let key = !makePrefixKey(length, cid).option + let namespaces = key.namespaces + + check: + namespaces.len == 4 + namespaces[0].value == CodexRepoNamespace + namespaces[1].value == "blocks" + namespaces[2].value == expectedPrefix + namespaces[3].value == expectedPostfix + + test "makePrefixKey should create manifest key": + let length = 6 + let cid = !createManifestCid().option + let expectedPrefix = ($cid)[^length..^1] + let expectedPostfix = $cid + + let key = !makePrefixKey(length, cid).option + let namespaces = key.namespaces + + check: + namespaces.len == 4 + namespaces[0].value == CodexRepoNamespace + namespaces[1].value == "manifests" + namespaces[2].value == expectedPrefix + namespaces[3].value == expectedPostfix + + test "createBlockExpirationMetadataKey should create block TTL key": + let cid = Cid.example + + let key = !createBlockExpirationMetadataKey(cid).option + let namespaces = key.namespaces + + check: + namespaces.len == 3 + namespaces[0].value == CodexMetaNamespace + namespaces[1].value == "ttl" + namespaces[2].value == $cid + + test "createBlockExpirationMetadataQueryKey should create key for all block TTL entries": + let key = !createBlockExpirationMetadataQueryKey().option + let namespaces = key.namespaces + + check: + namespaces.len == 3 + namespaces[0].value == CodexMetaNamespace + namespaces[1].value == "ttl" + namespaces[2].value == "*" \ No newline at end of file diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim new file mode 100644 index 00000000..bdf48c12 --- /dev/null +++ b/tests/codex/stores/testmaintenance.nim @@ -0,0 +1,188 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/chronos +import pkg/questionable/results +import pkg/codex/blocktype as bt +import pkg/codex/stores/repostore +import pkg/codex/clock + +import ../../asynctest +import ../helpers +import ../helpers/mocktimer +import ../helpers/mockrepostore +import ../helpers/mockclock +import ../examples + +import codex/stores/maintenance + +checksuite "BlockMaintainer": + var mockRepoStore: MockRepoStore + var interval: Duration + var mockTimer: MockTimer + var mockClock: MockClock + + var blockMaintainer: BlockMaintainer + + var testBe1: BlockExpiration + var testBe2: BlockExpiration + var testBe3: BlockExpiration + + proc createTestExpiration(expiry: SecondsSince1970): BlockExpiration = + BlockExpiration( + cid: bt.Block.example.cid, + expiry: expiry + ) + + setup: + mockClock = MockClock.new() + mockClock.set(100) + + testBe1 = createTestExpiration(200) + testBe2 = createTestExpiration(300) + testBe3 = createTestExpiration(400) + + mockRepoStore = MockRepoStore.new() + mockRepoStore.testBlockExpirations.add(testBe1) + mockRepoStore.testBlockExpirations.add(testBe2) + mockRepoStore.testBlockExpirations.add(testBe3) + + interval = 1.days + mockTimer = MockTimer.new() + + blockMaintainer = BlockMaintainer.new( + mockRepoStore, + interval, + numberOfBlocksPerInterval = 2, + mockTimer, + mockClock) + + test "Start should start timer at provided interval": + blockMaintainer.start() + check mockTimer.startCalled == 1 + check mockTimer.mockInterval == interval + + test "Stop should stop timer": + await blockMaintainer.stop() + check mockTimer.stopCalled == 1 + + test "Timer callback should call getBlockExpirations on RepoStore": + blockMaintainer.start() + await mockTimer.invokeCallback() + + check: + mockRepoStore.getBeMaxNumber == 2 + mockRepoStore.getBeOffset == 0 + + test "Timer callback should handle Catachable errors": + mockRepoStore.getBlockExpirationsThrows = true + blockMaintainer.start() + await mockTimer.invokeCallback() + + test "Subsequent timer callback should call getBlockExpirations on RepoStore with offset": + blockMaintainer.start() + await mockTimer.invokeCallback() + await mockTimer.invokeCallback() + + check: + mockRepoStore.getBeMaxNumber == 2 + mockRepoStore.getBeOffset == 2 + + test "Timer callback should delete no blocks if none are expired": + blockMaintainer.start() + await mockTimer.invokeCallback() + + check: + mockRepoStore.delBlockCids.len == 0 + + test "Timer callback should delete one block if it is expired": + mockClock.set(250) + blockMaintainer.start() + await mockTimer.invokeCallback() + + check: + mockRepoStore.delBlockCids == [testBe1.cid] + + test "Timer callback should delete multiple blocks if they are expired": + mockClock.set(500) + blockMaintainer.start() + await mockTimer.invokeCallback() + + check: + mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid] + + test "After deleting a block, subsequent timer callback should decrease offset by the number of deleted blocks": + mockClock.set(250) + blockMaintainer.start() + await mockTimer.invokeCallback() + + check mockRepoStore.delBlockCids == [testBe1.cid] + + # Because one block was deleted, the offset used in the next call should be 2 minus 1. + await mockTimer.invokeCallback() + + check: + mockRepoStore.getBeMaxNumber == 2 + mockRepoStore.getBeOffset == 1 + + test "Should delete all blocks if expired, in two timer callbacks": + mockClock.set(500) + blockMaintainer.start() + await mockTimer.invokeCallback() + await mockTimer.invokeCallback() + + check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid] + + test "Iteration offset should loop": + blockMaintainer.start() + await mockTimer.invokeCallback() + check mockRepoStore.getBeOffset == 0 + + await mockTimer.invokeCallback() + check mockRepoStore.getBeOffset == 2 + + await mockTimer.invokeCallback() + check mockRepoStore.getBeOffset == 0 + + test "Should handle new blocks": + proc invokeTimerManyTimes(): Future[void] {.async.} = + for i in countup(0, 10): + await mockTimer.invokeCallback() + + blockMaintainer.start() + await invokeTimerManyTimes() + + # no blocks have expired + check mockRepoStore.delBlockCids == [] + + mockClock.set(250) + await invokeTimerManyTimes() + # one block has expired + check mockRepoStore.delBlockCids == [testBe1.cid] + + # new blocks are added + let testBe4 = createTestExpiration(600) + let testBe5 = createTestExpiration(700) + mockRepoStore.testBlockExpirations.add(testBe4) + mockRepoStore.testBlockExpirations.add(testBe5) + + mockClock.set(500) + await invokeTimerManyTimes() + # All blocks have expired + check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid] + + mockClock.set(650) + await invokeTimerManyTimes() + # First new block has expired + check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid] + + mockClock.set(750) + await invokeTimerManyTimes() + # Second new block has expired + check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid, testBe5.cid] diff --git a/tests/codex/stores/testqueryiterhelper.nim b/tests/codex/stores/testqueryiterhelper.nim new file mode 100644 index 00000000..ddc769c8 --- /dev/null +++ b/tests/codex/stores/testqueryiterhelper.nim @@ -0,0 +1,65 @@ +import std/sugar + +import pkg/stew/results +import pkg/questionable +import pkg/chronos +import pkg/datastore/typedds +import pkg/datastore/sql/sqliteds +import pkg/codex/stores/queryiterhelper +import pkg/codex/utils/asynciter + +import ../../asynctest +import ../helpers + +proc encode(s: string): seq[byte] = + s.toBytes() + +proc decode(T: type string, bytes: seq[byte]): ?!T = + success(string.fromBytes(bytes)) + +asyncchecksuite "Test QueryIter helper": + var + tds: TypedDatastore + + setupAll: + tds = TypedDatastore.init(SQLiteDatastore.new(Memory).tryGet()) + + teardownAll: + (await tds.close()).tryGet + + test "Should auto-dispose when QueryIter finishes": + let + source = { + "a": "11", + "b": "22" + }.toTable + Root = Key.init("/queryitertest").tryGet() + + for k, v in source: + let key = (Root / k).tryGet() + (await tds.put(key, v)).tryGet() + + var + disposed = false + queryIter = (await query[string](tds, Query.init(Root))).tryGet() + + let iterDispose: IterDispose = queryIter.dispose + queryIter.dispose = () => (disposed = true; iterDispose()) + + let + iter1 = (await toAsyncIter[string](queryIter)).tryGet() + iter2 = await filterSuccess[string](iter1) + + var items = initTable[string, string]() + + for fut in iter2: + let item = await fut + + items[item.key.value] = item.value + + check: + items == source + disposed == true + queryIter.finished == true + iter1.finished == true + iter2.finished == true diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim new file mode 100644 index 00000000..ecb3b75e --- /dev/null +++ b/tests/codex/stores/testrepostore.nim @@ -0,0 +1,391 @@ +import std/os +import std/strutils +import std/sequtils + +import pkg/questionable +import pkg/questionable/results + +import pkg/chronos +import pkg/stew/byteutils +import pkg/stew/endians2 +import pkg/datastore + +import pkg/codex/stores/cachestore +import pkg/codex/chunker +import pkg/codex/stores +import pkg/codex/blocktype as bt +import pkg/codex/clock +import pkg/codex/utils/asynciter + +import ../../asynctest +import ../helpers +import ../helpers/mockclock +import ../examples +import ./commonstoretests + +import ./repostore/testcoders + +checksuite "Test RepoStore start/stop": + + var + repoDs: Datastore + metaDs: Datastore + + setup: + repoDs = SQLiteDatastore.new(Memory).tryGet() + metaDs = SQLiteDatastore.new(Memory).tryGet() + + test "Should set started flag once started": + let repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200'nb) + await repo.start() + check repo.started + + test "Should set started flag to false once stopped": + let repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200'nb) + await repo.start() + await repo.stop() + check not repo.started + + test "Should allow start to be called multiple times": + let repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200'nb) + await repo.start() + await repo.start() + check repo.started + + test "Should allow stop to be called multiple times": + let repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200'nb) + await repo.stop() + await repo.stop() + check not repo.started + +asyncchecksuite "RepoStore": + var + repoDs: Datastore + metaDs: Datastore + mockClock: MockClock + + repo: RepoStore + + let + now: SecondsSince1970 = 123 + + setup: + repoDs = SQLiteDatastore.new(Memory).tryGet() + metaDs = SQLiteDatastore.new(Memory).tryGet() + mockClock = MockClock.new() + mockClock.set(now) + + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 200'nb) + + teardown: + (await repoDs.close()).tryGet + (await metaDs.close()).tryGet + + proc createTestBlock(size: int): bt.Block = + bt.Block.new('a'.repeat(size).toBytes).tryGet() + + test "Should update current used bytes on block put": + let blk = createTestBlock(200) + + check repo.quotaUsedBytes == 0'nb + (await repo.putBlock(blk)).tryGet + + check: + repo.quotaUsedBytes == 200'nb + + test "Should update current used bytes on block delete": + let blk = createTestBlock(100) + + check repo.quotaUsedBytes == 0'nb + (await repo.putBlock(blk)).tryGet + check repo.quotaUsedBytes == 100'nb + + (await repo.delBlock(blk.cid)).tryGet + + check: + repo.quotaUsedBytes == 0'nb + + test "Should not update current used bytes if block exist": + let blk = createTestBlock(100) + + check repo.quotaUsedBytes == 0'nb + (await repo.putBlock(blk)).tryGet + check repo.quotaUsedBytes == 100'nb + + # put again + (await repo.putBlock(blk)).tryGet + check repo.quotaUsedBytes == 100'nb + + test "Should fail storing passed the quota": + let blk = createTestBlock(300) + + check repo.totalUsed == 0'nb + expect QuotaNotEnoughError: + (await repo.putBlock(blk)).tryGet + + test "Should reserve bytes": + let blk = createTestBlock(100) + + check repo.totalUsed == 0'nb + (await repo.putBlock(blk)).tryGet + check repo.totalUsed == 100'nb + + (await repo.reserve(100'nb)).tryGet + + check: + repo.totalUsed == 200'nb + repo.quotaUsedBytes == 100'nb + repo.quotaReservedBytes == 100'nb + + test "Should not reserve bytes over max quota": + let blk = createTestBlock(100) + + check repo.totalUsed == 0'nb + (await repo.putBlock(blk)).tryGet + check repo.totalUsed == 100'nb + + expect QuotaNotEnoughError: + (await repo.reserve(101'nb)).tryGet + + check: + repo.totalUsed == 100'nb + repo.quotaUsedBytes == 100'nb + repo.quotaReservedBytes == 0'nb + + test "Should release bytes": + discard createTestBlock(100) + + check repo.totalUsed == 0'nb + (await repo.reserve(100'nb)).tryGet + check repo.totalUsed == 100'nb + + (await repo.release(100'nb)).tryGet + + check: + repo.totalUsed == 0'nb + repo.quotaUsedBytes == 0'nb + repo.quotaReservedBytes == 0'nb + + test "Should not release bytes less than quota": + check repo.totalUsed == 0'nb + (await repo.reserve(100'nb)).tryGet + check repo.totalUsed == 100'nb + + expect RangeDefect: + (await repo.release(101'nb)).tryGet + + check: + repo.totalUsed == 100'nb + repo.quotaUsedBytes == 0'nb + repo.quotaReservedBytes == 100'nb + + proc getExpirations(): Future[seq[BlockExpiration]] {.async.} = + let iter = (await repo.getBlockExpirations(100, 0)).tryGet() + + var res = newSeq[BlockExpiration]() + for fut in iter: + let be = await fut + res.add(be) + + res + + test "Should store block expiration timestamp": + let + duration = 10.seconds + blk = createTestBlock(100) + + let + expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) + + (await repo.putBlock(blk, duration.some)).tryGet + + let expirations = await getExpirations() + + check: + expectedExpiration in expirations + + test "Should store block with default expiration timestamp when not provided": + let + blk = createTestBlock(100) + + let + expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + DefaultBlockTtl.seconds) + + (await repo.putBlock(blk)).tryGet + + let expirations = await getExpirations() + + check: + expectedExpiration in expirations + + test "Should refuse update expiry with negative timestamp": + let + blk = createTestBlock(100) + expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) + + (await repo.putBlock(blk, some 10.seconds)).tryGet + + let expirations = await getExpirations() + + check: + expectedExpiration in expirations + + expect ValueError: + (await repo.ensureExpiry(blk.cid, -1)).tryGet + + expect ValueError: + (await repo.ensureExpiry(blk.cid, 0)).tryGet + + test "Should fail when updating expiry of non-existing block": + let + blk = createTestBlock(100) + + expect BlockNotFoundError: + (await repo.ensureExpiry(blk.cid, 10)).tryGet + + test "Should update block expiration timestamp when new expiration is farther": + let + blk = createTestBlock(100) + expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) + updatedExpectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 20) + + (await repo.putBlock(blk, some 10.seconds)).tryGet + + let expirations = await getExpirations() + + check: + expectedExpiration in expirations + + (await repo.ensureExpiry(blk.cid, now + 20)).tryGet + + let updatedExpirations = await getExpirations() + + check: + expectedExpiration notin updatedExpirations + updatedExpectedExpiration in updatedExpirations + + test "Should not update block expiration timestamp when current expiration is farther then new one": + let + blk = createTestBlock(100) + expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) + updatedExpectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 5) + + (await repo.putBlock(blk, some 10.seconds)).tryGet + + let expirations = await getExpirations() + + check: + expectedExpiration in expirations + + (await repo.ensureExpiry(blk.cid, now + 5)).tryGet + + let updatedExpirations = await getExpirations() + + check: + expectedExpiration in updatedExpirations + updatedExpectedExpiration notin updatedExpirations + + test "delBlock should remove expiration metadata": + let + blk = createTestBlock(100) + expectedKey = Key.init("meta/ttl/" & $blk.cid).tryGet + + (await repo.putBlock(blk, 10.seconds.some)).tryGet + (await repo.delBlock(blk.cid)).tryGet + + let expirations = await getExpirations() + + check: + expirations.len == 0 + + test "Should retrieve block expiration information": + proc unpack(beIter: Future[?!AsyncIter[BlockExpiration]]): Future[seq[BlockExpiration]] {.async.} = + var expirations = newSeq[BlockExpiration](0) + without iter =? (await beIter), err: + return expirations + for beFut in toSeq(iter): + let value = await beFut + expirations.add(value) + return expirations + + let + duration = 10.seconds + blk1 = createTestBlock(10) + blk2 = createTestBlock(11) + blk3 = createTestBlock(12) + + let + expectedExpiration: SecondsSince1970 = now + 10 + + proc assertExpiration(be: BlockExpiration, expectedBlock: bt.Block) = + check: + be.cid == expectedBlock.cid + be.expiry == expectedExpiration + + + (await repo.putBlock(blk1, duration.some)).tryGet + (await repo.putBlock(blk2, duration.some)).tryGet + (await repo.putBlock(blk3, duration.some)).tryGet + + let + blockExpirations1 = await unpack(repo.getBlockExpirations(maxNumber=2, offset=0)) + blockExpirations2 = await unpack(repo.getBlockExpirations(maxNumber=2, offset=2)) + + check blockExpirations1.len == 2 + assertExpiration(blockExpirations1[0], blk2) + assertExpiration(blockExpirations1[1], blk1) + + check blockExpirations2.len == 1 + assertExpiration(blockExpirations2[0], blk3) + + test "should put empty blocks": + let blk = Cid.example.emptyBlock.tryGet() + check (await repo.putBlock(blk)).isOk + + test "should get empty blocks": + let blk = Cid.example.emptyBlock.tryGet() + + let got = await repo.getBlock(blk.cid) + check got.isOk + check got.get.cid == blk.cid + + test "should delete empty blocks": + let blk = Cid.example.emptyBlock.tryGet() + check (await repo.delBlock(blk.cid)).isOk + + test "should have empty block": + let blk = Cid.example.emptyBlock.tryGet() + + let has = await repo.hasBlock(blk.cid) + check has.isOk + check has.get + +commonBlockStoreTests( + "RepoStore Sql backend", proc: BlockStore = + BlockStore( + RepoStore.new( + SQLiteDatastore.new(Memory).tryGet(), + SQLiteDatastore.new(Memory).tryGet(), + clock = MockClock.new()))) + +const + path = currentSourcePath().parentDir / "test" + +proc before() {.async.} = + createDir(path) + +proc after() {.async.} = + removeDir(path) + +let + depth = path.split(DirSep).len + +commonBlockStoreTests( + "RepoStore FS backend", proc: BlockStore = + BlockStore( + RepoStore.new( + FSDatastore.new(path, depth).tryGet(), + SQLiteDatastore.new(Memory).tryGet(), + clock = MockClock.new())), + before = before, + after = after) diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index d7fe3ec1..10bab65d 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -1,10 +1,12 @@ import pkg/chronos -import pkg/asynctest import pkg/stew/results import pkg/codex/utils/asyncheapqueue import pkg/codex/rng +import ../asynctest +import ./helpers + type Task* = tuple[name: string, priority: int] @@ -21,7 +23,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] = while tmp.len > 0: result.add(popNoWait(tmp).tryGet()) -suite "Synchronous tests": +checksuite "Synchronous tests": test "Test pushNoWait - Min": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] @@ -127,8 +129,7 @@ suite "Synchronous tests": heap.clear() check heap.len == 0 -suite "Asynchronous Tests": - +asyncchecksuite "Asynchronous Tests": test "Test push": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] diff --git a/tests/codex/testasyncstreamwrapper.nim b/tests/codex/testasyncstreamwrapper.nim new file mode 100644 index 00000000..8a325351 --- /dev/null +++ b/tests/codex/testasyncstreamwrapper.nim @@ -0,0 +1,90 @@ +import pkg/chronos +import pkg/chronos/transports/stream +import pkg/chronos/transports/common +import pkg/chronos/streams/asyncstream +import pkg/codex/streams +import pkg/stew/byteutils + +import ../asynctest +import ./helpers + +asyncchecksuite "AsyncStreamWrapper": + + let data = "0123456789012345678901234567890123456789" + let address = initTAddress("127.0.0.1:46001") + + proc serveReadingClient(server: StreamServer, + transp: StreamTransport) {.async.} = + var wstream = newAsyncStreamWriter(transp) + await wstream.write(data) + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + + proc serveWritingClient(buf: pointer, bufLen: int): auto = + return proc(server: StreamServer, transp: StreamTransport) {.async.} = + var rstream = newAsyncStreamReader(transp) + discard await rstream.readOnce(buf, bufLen) + await rstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + + test "Read all data": + var server = createStreamServer(address, serveReadingClient, {ReuseAddr}) + server.start() + + var transp = await connect(address) + var rstream = newAsyncStreamReader(transp) + var wrapper = AsyncStreamWrapper.new(reader = rstream) + var buf = newSeq[byte](data.len) + + let readLen = (await wrapper.readOnce(addr buf[0], buf.len)) + + await wrapper.closeImpl() + await transp.closeWait() + await server.join() + + check rstream.closed() + check buf.len == readLen + check data.toBytes == buf + + test "Read not all data": + var server = createStreamServer(address, serveReadingClient, {ReuseAddr}) + server.start() + + var transp = await connect(address) + var rstream = newAsyncStreamReader(transp) + var wrapper = AsyncStreamWrapper.new(reader = rstream) + var buf = newSeq[byte](data.len div 2) + + let readLen = (await wrapper.readOnce(addr buf[0], buf.len)) + + await wrapper.close() + await transp.closeWait() + await server.join() + + check rstream.closed() + check buf.len == readLen + check data.toBytes[0 .. buf.len - 1] == buf + + test "Write all data": + var buf = newSeq[byte](data.len) + + var server = createStreamServer(address, serveWritingClient(addr buf[0], buf.len), {ReuseAddr}) + server.start() + + var transp = await connect(address) + var wstream = newAsyncStreamWriter(transp) + var wrapper = AsyncStreamWrapper.new(writer = wstream) + + await wrapper.write(data.toBytes()) + + await wrapper.close() + await transp.closeWait() + await server.join() + + check wstream.closed() + check data.toBytes == buf diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 3a7f1b4f..f4f40a29 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -1,11 +1,13 @@ -import pkg/asynctest + import pkg/stew/byteutils import pkg/codex/chunker -import pkg/chronicles +import pkg/codex/logutils import pkg/chronos -import pkg/libp2p -suite "Chunking": +import ../asynctest +import ./helpers + +asyncchecksuite "Chunking": test "should return proper size chunks": var offset = 0 let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] @@ -22,7 +24,7 @@ suite "Chunking": let chunker = Chunker.new( reader = reader, - chunkSize = 2) + chunkSize = 2'nb) check: (await chunker.getBytes()) == [1.byte, 2] @@ -37,7 +39,7 @@ suite "Chunking": let stream = BufferStream.new() let chunker = LPStreamChunker.new( stream = stream, - chunkSize = 2) + chunkSize = 2'nb) proc writer() {.async.} = for d in [@[1.byte, 2, 3, 4], @[5.byte, 6, 7, 8], @[9.byte, 0]]: @@ -59,9 +61,9 @@ suite "Chunking": test "should chunk file": let - (path, _, _) = instantiationInfo(-2, fullPaths = true) # get this file's name + path = currentSourcePath() file = open(path) - fileChunker = FileChunker.new(file = file, chunkSize = 256, pad = false) + fileChunker = FileChunker.new(file = file, chunkSize = 256'nb, pad = false) var data: seq[byte] while true: @@ -69,7 +71,7 @@ suite "Chunking": if buff.len <= 0: break - check buff.len <= fileChunker.chunkSize + check buff.len <= fileChunker.chunkSize.int data.add(buff) check: diff --git a/tests/codex/testclock.nim b/tests/codex/testclock.nim new file mode 100644 index 00000000..513e4963 --- /dev/null +++ b/tests/codex/testclock.nim @@ -0,0 +1,26 @@ +import std/unittest + +import codex/clock +import ./helpers + +checksuite "Clock": + proc testConversion(seconds: SecondsSince1970) = + let asBytes = seconds.toBytes + + let restored = asBytes.toSecondsSince1970 + + check restored == seconds + + test "SecondsSince1970 should support bytes conversions": + let secondsToTest: seq[int64] = @[ + int64.high, + int64.low, + 0, + 1, + 12345, + -1, + -12345 + ] + + for seconds in secondsToTest: + testConversion(seconds) diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 9b50c558..3febcfa5 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -1,9 +1,9 @@ import std/sequtils +import std/sugar +import std/cpuinfo -import pkg/asynctest import pkg/chronos -import pkg/libp2p -import pkg/questionable +import pkg/datastore import pkg/questionable/results import pkg/codex/erasure @@ -11,12 +11,16 @@ import pkg/codex/manifest import pkg/codex/stores import pkg/codex/blocktype as bt import pkg/codex/rng +import pkg/codex/utils +import pkg/codex/indexingstrategy +import pkg/taskpools +import ../asynctest import ./helpers +import ./examples suite "Erasure encode/decode": - - const BlockSize = 1024 + const BlockSize = 1024'nb const dataSetSize = BlockSize * 123 # weird geometry var rng: Rng @@ -24,32 +28,35 @@ suite "Erasure encode/decode": var manifest: Manifest var store: BlockStore var erasure: Erasure + var taskpool: Taskpool + let repoTmp = TempLevelDb.new() + let metaTmp = TempLevelDb.new() setup: + let + repoDs = repoTmp.newDb() + metaDs = metaTmp.newDb() rng = Rng.instance() chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize) - manifest = !Manifest.new(blockSize = BlockSize) - store = CacheStore.new(cacheSize = (dataSetSize * 2), chunkSize = BlockSize) - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + store = RepoStore.new(repoDs, metaDs) + taskpool = Taskpool.new(num_threads = countProcessors()) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) + manifest = await storeDataGetManifest(store, chunker) - while ( - let chunk = await chunker.getBytes(); - chunk.len > 0): - - let blk = bt.Block.new(chunk).tryGet() - manifest.add(blk.cid) - (await store.putBlock(blk)).tryGet() + teardown: + await repoTmp.destroyDb() + await metaTmp.destroyDb() proc encode(buffers, parity: int): Future[Manifest] {.async.} = let encoded = (await erasure.encode( manifest, - buffers, - parity)).tryGet() + buffers.Natural, + parity.Natural)).tryGet() check: - encoded.len mod (buffers + parity) == 0 - encoded.rounded == (manifest.len + (buffers - (manifest.len mod buffers))) + encoded.blocksCount mod (buffers + parity) == 0 + encoded.rounded == roundUp(manifest.blocksCount, buffers) encoded.steps == encoded.rounded div buffers return encoded @@ -62,25 +69,27 @@ suite "Erasure encode/decode": let encoded = await encode(buffers, parity) var - column = rng.rand(encoded.len div encoded.steps) # random column - dropped: seq[Cid] + column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column + dropped: seq[int] - for _ in 0.. 0 + leaves = [ + 0.toF.Poseidon2Hash, + 1.toF.Poseidon2Hash, + 2.toF.Poseidon2Hash, + 3.toF.Poseidon2Hash] - let encodedCid = Cid.init(manifest.version, manifest.codec, mh).tryGet() + slotLeavesCids = leaves.toSlotCids().tryGet + + tree = Poseidon2Tree.init(leaves).tryGet + verifyCid = tree.root.tryGet.toVerifyCid().tryGet + + verifiableManifest = Manifest.new( + manifest = protectedManifest, + verifyRoot = verifyCid, + slotRoots = slotLeavesCids + ).tryGet() + + proc encodeDecode(manifest: Manifest): Manifest = + let e = manifest.encode().tryGet() + Manifest.decode(e).tryGet() + + test "Should encode/decode to/from base manifest": check: - encodedCid == manifest.cid.tryGet() + encodeDecode(manifest) == manifest - test "Should encode/decode to/from manifest": - let - blocks = (0..<1000).mapIt( - Block.new(("Block " & $it).toBytes).tryGet().cid - ) - - var - manifest = Manifest.new(blocks).tryGet() - - let - e = manifest.encode().tryGet() - decoded = Manifest.decode(e).tryGet() + test "Should encode/decode large manifest": + let large = Manifest.new( + treeCid = Cid.example, + blockSize = (64 * 1024).NBytes, + datasetSize = (5 * 1024).MiBs + ) check: - decoded.blocks == blocks - decoded.protected == false - - test "Should produce a protected manifest": - let - blocks = (0..<333).mapIt( - Block.new(("Block " & $it).toBytes).tryGet().cid - ) - manifest = Manifest.new(blocks).tryGet() - protected = Manifest.new(manifest, 2, 2).tryGet() + encodeDecode(large) == large + test "Should encode/decode to/from protected manifest": check: - protected.originalCid == manifest.cid.tryGet() - protected.blocks[0..<333] == manifest.blocks - protected.protected == true - protected.originalLen == manifest.len - - # fill up with empty Cid's - for i in protected.rounded.. 0): - - let blk = bt.Block.new(chunk).tryGet() - (await localStore.putBlock(blk)).tryGet() - manifest.add(blk.cid) - - return manifest - - proc retrieve(cid: Cid): Future[seq[byte]] {.async.} = - # Retrieve an entire file contents by file Cid - let - oddChunkSize = math.trunc(BlockSize/1.359).int # Let's check that node.retrieve can correctly rechunk data - stream = (await node.retrieve(cid)).tryGet() - var - data: seq[byte] - - while not stream.atEof: - var - buf = newSeq[byte](oddChunkSize) - res = await stream.readOnce(addr buf[0], oddChunkSize) - check res <= oddChunkSize - buf.setLen(res) - data &= buf - - return data - - setup: - file = open(path.splitFile().dir /../ "fixtures" / "test.jpg") - chunker = FileChunker.new(file = file, chunkSize = BlockSize) - switch = newStandardSwitch() - wallet = WalletRef.new(EthPrivateKey.random()) - network = BlockExcNetwork.new(switch) - localStore = CacheStore.new() - blockDiscovery = Discovery.new( - switch.peerInfo.privateKey, - announceAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0") - .expect("Should return multiaddress")]) - peerStore = PeerCtxStore.new() - pendingBlocks = PendingBlocksManager.new() - discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) - engine = BlockExcEngine.new(localStore, wallet, network, discovery, peerStore, pendingBlocks) - store = NetworkStore.new(engine, localStore) - node = CodexNodeRef.new(switch, store, engine, nil, blockDiscovery) # TODO: pass `Erasure` - - await node.start() - - teardown: - close(file) - await node.stop() - - test "Fetch Manifest": - let - manifest = await Manifest.fetch(chunker) - - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), - codec = DagPBCodec - ).tryGet() - - (await localStore.putBlock(manifestBlock)).tryGet() - - let - fetched = (await node.fetchManifest(manifestBlock.cid)).tryGet() - - check: - fetched.cid == manifest.cid - fetched.blocks == manifest.blocks - - test "Block Batching": - let - manifest = await Manifest.fetch(chunker) - - for batchSize in 1..12: - (await node.fetchBatched( - manifest, - batchSize = batchSize, - proc(blocks: seq[bt.Block]) {.gcsafe, async.} = - check blocks.len > 0 and blocks.len <= batchSize - )).tryGet() - - test "Store and retrieve Data Stream": - let - stream = BufferStream.new() - storeFut = node.store(stream) - oddChunkSize = math.trunc(BlockSize/3.14).int # Let's check that node.store can correctly rechunk these odd chunks - oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) # TODO: doesn't work with pad=tue - var - original: seq[byte] - - try: - while ( - let chunk = await oddChunker.getBytes(); - chunk.len > 0): - original &= chunk - await stream.pushData(chunk) - finally: - await stream.pushEof() - await stream.close() - - let - manifestCid = (await storeFut).tryGet() - check: - (await localStore.hasBlock(manifestCid)).tryGet() - - let - manifestBlock = (await localStore.getBlock(manifestCid)).tryGet() - localManifest = Manifest.decode(manifestBlock).tryGet() - - let - data = await retrieve(manifestCid) - check: - data.len == localManifest.originalBytes - data.len == original.len - sha256.digest(data) == sha256.digest(original) - - test "Retrieve One Block": - let - testString = "Block 1" - blk = bt.Block.new(testString.toBytes).tryGet() - - (await localStore.putBlock(blk)).tryGet() - let stream = (await node.retrieve(blk.cid)).tryGet() - - var data = newSeq[byte](testString.len) - await stream.readExactly(addr data[0], data.len) - check string.fromBytes(data) == testString +{.warning[UnusedImport]: off.} diff --git a/tests/codex/testproving.nim b/tests/codex/testproving.nim deleted file mode 100644 index bf810391..00000000 --- a/tests/codex/testproving.nim +++ /dev/null @@ -1,119 +0,0 @@ -import pkg/asynctest -import pkg/chronos -import pkg/codex/proving -import ./helpers/mockproofs -import ./helpers/mockclock -import ./helpers/eventually -import ./examples - -suite "Proving": - - var proving: Proving - var proofs: MockProofs - var clock: MockClock - - setup: - proofs = MockProofs.new() - clock = MockClock.new() - proving = Proving.new(proofs, clock) - await proving.start() - - teardown: - await proving.stop() - - proc advanceToNextPeriod(proofs: MockProofs) {.async.} = - let periodicity = await proofs.periodicity() - clock.advance(periodicity.seconds.truncate(int64)) - - test "maintains a list of contract ids to watch": - let id1, id2 = SlotId.example - check proving.slots.len == 0 - proving.add(id1) - check proving.slots.contains(id1) - proving.add(id2) - check proving.slots.contains(id1) - check proving.slots.contains(id2) - - test "removes duplicate contract ids": - let id = SlotId.example - proving.add(id) - proving.add(id) - check proving.slots.len == 1 - - test "invokes callback when proof is required": - let id = SlotId.example - proving.add(id) - var called: bool - proc onProofRequired(id: SlotId) = - called = true - proving.onProofRequired = onProofRequired - proofs.setProofRequired(id, true) - await proofs.advanceToNextPeriod() - check eventually called - - test "callback receives id of contract for which proof is required": - let id1, id2 = SlotId.example - proving.add(id1) - proving.add(id2) - var callbackIds: seq[SlotId] - proc onProofRequired(id: SlotId) = - callbackIds.add(id) - proving.onProofRequired = onProofRequired - proofs.setProofRequired(id1, true) - await proofs.advanceToNextPeriod() - check eventually callbackIds == @[id1] - proofs.setProofRequired(id1, false) - proofs.setProofRequired(id2, true) - await proofs.advanceToNextPeriod() - check eventually callbackIds == @[id1, id2] - - test "invokes callback when proof is about to be required": - let id = SlotId.example - proving.add(id) - var called: bool - proc onProofRequired(id: SlotId) = - called = true - proving.onProofRequired = onProofRequired - proofs.setProofRequired(id, false) - proofs.setProofToBeRequired(id, true) - await proofs.advanceToNextPeriod() - check eventually called - - test "stops watching when contract has ended": - let id = SlotId.example - proving.add(id) - proofs.setProofEnd(id, clock.now().u256) - await proofs.advanceToNextPeriod() - var called: bool - proc onProofRequired(id: SlotId) = - called = true - proving.onProofRequired = onProofRequired - proofs.setProofRequired(id, true) - await proofs.advanceToNextPeriod() - check eventually (not proving.slots.contains(id)) - check not called - - test "submits proofs": - let id = SlotId.example - let proof = exampleProof() - await proving.submitProof(id, proof) - - test "supports proof submission subscriptions": - let id = SlotId.example - let proof = exampleProof() - - var receivedIds: seq[SlotId] - var receivedProofs: seq[seq[byte]] - - proc onProofSubmission(id: SlotId, proof: seq[byte]) = - receivedIds.add(id) - receivedProofs.add(proof) - - let subscription = await proving.subscribeProofSubmission(onProofSubmission) - - await proving.submitProof(id, proof) - - check receivedIds == @[id] - check receivedProofs == @[proof] - - await subscription.unsubscribe() diff --git a/tests/codex/testpurchasing.nim b/tests/codex/testpurchasing.nim index 0c525ccf..25504732 100644 --- a/tests/codex/testpurchasing.nim +++ b/tests/codex/testpurchasing.nim @@ -1,21 +1,25 @@ import std/times -import pkg/asynctest import pkg/chronos -import pkg/upraises import pkg/stint import pkg/codex/purchasing -import pkg/codex/purchasing/states/[finished, failed, error, started, submitted, unknown] +import pkg/codex/purchasing/states/finished +import pkg/codex/purchasing/states/started +import pkg/codex/purchasing/states/submitted +import pkg/codex/purchasing/states/unknown +import pkg/codex/purchasing/states/cancelled +import pkg/codex/purchasing/states/failed + +import ../asynctest import ./helpers/mockmarket import ./helpers/mockclock -import ./helpers/eventually import ./examples +import ./helpers -suite "Purchasing": - +asyncchecksuite "Purchasing": var purchasing: Purchasing var market: MockMarket var clock: MockClock - var request: StorageRequest + var request, populatedRequest: StorageRequest setup: market = MockMarket.new() @@ -30,13 +34,19 @@ suite "Purchasing": ) ) + # We need request which has stable ID during the whole Purchasing pipeline + # for some tests (related to expiry). Because of Purchasing.populate() we need + # to do the steps bellow. + populatedRequest = StorageRequest.example + populatedRequest.client = await market.getSigner() + test "submits a storage request when asked": discard await purchasing.purchase(request) - let submitted = market.requested[0] - check submitted.ask.slots == request.ask.slots - check submitted.ask.slotSize == request.ask.slotSize - check submitted.ask.duration == request.ask.duration - check submitted.ask.reward == request.ask.reward + check eventually market.requested.len > 0 + check market.requested[0].ask.slots == request.ask.slots + check market.requested[0].ask.slotSize == request.ask.slotSize + check market.requested[0].ask.duration == request.ask.duration + check market.requested[0].ask.reward == request.ask.reward test "remembers purchases": let purchase1 = await purchasing.purchase(request) @@ -50,63 +60,63 @@ suite "Purchasing": test "can change default value for proof probability": purchasing.proofProbability = 42.u256 discard await purchasing.purchase(request) + check eventually market.requested.len > 0 check market.requested[0].ask.proofProbability == 42.u256 test "can override proof probability per request": request.ask.proofProbability = 42.u256 discard await purchasing.purchase(request) + check eventually market.requested.len > 0 check market.requested[0].ask.proofProbability == 42.u256 - test "has a default value for request expiration interval": - check purchasing.requestExpiryInterval != 0.u256 - - test "can change default value for request expiration interval": - purchasing.requestExpiryInterval = 42.u256 - let start = getTime().toUnix() - discard await purchasing.purchase(request) - check market.requested[0].expiry == (start + 42).u256 - - test "can override expiry time per request": - let expiry = (getTime().toUnix() + 42).u256 - request.expiry = expiry - discard await purchasing.purchase(request) - check market.requested[0].expiry == expiry - test "includes a random nonce in every storage request": discard await purchasing.purchase(request) discard await purchasing.purchase(request) + check eventually market.requested.len > 0 check market.requested[0].nonce != market.requested[1].nonce test "sets client address in request": discard await purchasing.purchase(request) + check eventually market.requested.len > 0 check market.requested[0].client == await market.getSigner() test "succeeds when request is finished": - let purchase = await purchasing.purchase(request) + market.requestExpiry[populatedRequest.id] = getTime().toUnix() + 10 + let purchase = await purchasing.purchase(populatedRequest) + + check eventually market.requested.len > 0 let request = market.requested[0] let requestEnd = getTime().toUnix() + 42 market.requestEnds[request.id] = requestEnd + market.emitRequestFulfilled(request.id) - clock.set(requestEnd) + clock.set(requestEnd + 1) await purchase.wait() check purchase.error.isNone test "fails when request times out": - let purchase = await purchasing.purchase(request) + let expiry = getTime().toUnix() + 10 + market.requestExpiry[populatedRequest.id] = expiry + let purchase = await purchasing.purchase(populatedRequest) + check eventually market.requested.len > 0 let request = market.requested[0] - clock.set(request.expiry.truncate(int64)) + + clock.set(expiry + 1) expect PurchaseTimeout: await purchase.wait() test "checks that funds were withdrawn when purchase times out": - let purchase = await purchasing.purchase(request) + let expiry = getTime().toUnix() + 10 + market.requestExpiry[populatedRequest.id] = expiry + let purchase = await purchasing.purchase(populatedRequest) + check eventually market.requested.len > 0 let request = market.requested[0] - clock.set(request.expiry.truncate(int64)) + clock.set(expiry + 1) expect PurchaseTimeout: await purchase.wait() check market.withdrawn == @[request.id] -suite "Purchasing state machine": +checksuite "Purchasing state machine": var purchasing: Purchasing var market: MockMarket @@ -141,100 +151,82 @@ suite "Purchasing state machine": let request1, request2, request3, request4, request5 = StorageRequest.example market.requested = @[request1, request2, request3, request4, request5] market.activeRequests[me] = @[request1.id, request2.id, request3.id, request4.id, request5.id] - market.state[request1.id] = RequestState.New - market.state[request2.id] = RequestState.Started - market.state[request3.id] = RequestState.Cancelled - market.state[request4.id] = RequestState.Finished - market.state[request5.id] = RequestState.Failed + market.requestState[request1.id] = RequestState.New + market.requestState[request2.id] = RequestState.Started + market.requestState[request3.id] = RequestState.Cancelled + market.requestState[request4.id] = RequestState.Finished + market.requestState[request5.id] = RequestState.Failed # ensure the started state doesn't error, giving a false positive test result market.requestEnds[request2.id] = clock.now() - 1 await purchasing.load() - check purchasing.getPurchase(PurchaseId(request1.id)).?finished == false.some - check purchasing.getPurchase(PurchaseId(request2.id)).?finished == true.some - check purchasing.getPurchase(PurchaseId(request3.id)).?finished == true.some - check purchasing.getPurchase(PurchaseId(request4.id)).?finished == true.some - check purchasing.getPurchase(PurchaseId(request5.id)).?finished == true.some - check purchasing.getPurchase(PurchaseId(request5.id)).?error.isSome + check eventually purchasing.getPurchase(PurchaseId(request1.id)).?finished == false.some + check eventually purchasing.getPurchase(PurchaseId(request2.id)).?finished == true.some + check eventually purchasing.getPurchase(PurchaseId(request3.id)).?finished == true.some + check eventually purchasing.getPurchase(PurchaseId(request4.id)).?finished == true.some + check eventually purchasing.getPurchase(PurchaseId(request5.id)).?finished == true.some + check eventually purchasing.getPurchase(PurchaseId(request5.id)).?error.isSome test "moves to PurchaseSubmitted when request state is New": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) market.requested = @[request] - market.state[request.id] = RequestState.New - purchase.switch(PurchaseUnknown()) - check (purchase.state as PurchaseSubmitted).isSome + market.requestState[request.id] = RequestState.New + let next = await PurchaseUnknown().run(purchase) + check !next of PurchaseSubmitted test "moves to PurchaseStarted when request state is Started": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) market.requested = @[request] - market.state[request.id] = RequestState.Started - purchase.switch(PurchaseUnknown()) - check (purchase.state as PurchaseStarted).isSome + market.requestState[request.id] = RequestState.Started + let next = await PurchaseUnknown().run(purchase) + check !next of PurchaseStarted - test "moves to PurchaseErrored when request state is Cancelled": + test "moves to PurchaseCancelled when request state is Cancelled": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) market.requested = @[request] - market.state[request.id] = RequestState.Cancelled - purchase.switch(PurchaseUnknown()) - check (purchase.state as PurchaseErrored).isSome - check purchase.error.?msg == "Purchase cancelled due to timeout".some + market.requestState[request.id] = RequestState.Cancelled + let next = await PurchaseUnknown().run(purchase) + check !next of PurchaseCancelled test "moves to PurchaseFinished when request state is Finished": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) market.requested = @[request] - market.state[request.id] = RequestState.Finished - purchase.switch(PurchaseUnknown()) - check (purchase.state as PurchaseFinished).isSome + market.requestState[request.id] = RequestState.Finished + let next = await PurchaseUnknown().run(purchase) + check !next of PurchaseFinished - test "moves to PurchaseErrored when request state is Failed": + test "moves to PurchaseFailed when request state is Failed": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) market.requested = @[request] - market.state[request.id] = RequestState.Failed - purchase.switch(PurchaseUnknown()) - check (purchase.state as PurchaseErrored).isSome - check purchase.error.?msg == "Purchase failed".some + market.requestState[request.id] = RequestState.Failed + let next = await PurchaseUnknown().run(purchase) + check !next of PurchaseFailed - test "moves to PurchaseErrored state once RequestFailed emitted": - let me = await market.getSigner() + test "moves to PurchaseFailed state once RequestFailed emitted": let request = StorageRequest.example - market.requested = @[request] - market.activeRequests[me] = @[request.id] - market.state[request.id] = RequestState.Started + let purchase = Purchase.new(request, market, clock) market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) - await purchasing.load() + let future = PurchaseStarted().run(purchase) - # emit mock contract failure event market.emitRequestFailed(request.id) - # must allow time for the callback to trigger the completion of the future - await sleepAsync(chronos.milliseconds(10)) - # now check the result - let purchase = purchasing.getPurchase(PurchaseId(request.id)) - let state = purchase.?state - check (state as PurchaseErrored).isSome - check (!purchase).error.?msg == "Purchase failed".some + let next = await future + check !next of PurchaseFailed test "moves to PurchaseFinished state once request finishes": - let me = await market.getSigner() let request = StorageRequest.example - market.requested = @[request] - market.activeRequests[me] = @[request.id] - market.state[request.id] = RequestState.Started + let purchase = Purchase.new(request, market, clock) market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) - await purchasing.load() + let future = PurchaseStarted().run(purchase) - # advance the clock to the end of the request - clock.advance(request.ask.duration.truncate(int64)) + clock.advance(request.ask.duration.truncate(int64) + 1) - # now check the result - proc getState: ?PurchaseState = - purchasing.getPurchase(PurchaseId(request.id)).?state as PurchaseState - - check eventually (getState() as PurchaseFinished).isSome + let next = await future + check !next of PurchaseFinished diff --git a/tests/codex/testsales.nim b/tests/codex/testsales.nim index 073e07c2..38c530f6 100644 --- a/tests/codex/testsales.nim +++ b/tests/codex/testsales.nim @@ -1,212 +1,7 @@ -import std/sets -import pkg/asynctest -import pkg/chronos -import pkg/codex/contracts/requests -import pkg/codex/proving -import pkg/codex/sales -import ./helpers/mockmarket -import ./helpers/mockclock -import ./helpers/eventually -import ./examples +import ./sales/testsales +import ./sales/teststates +import ./sales/testreservations +import ./sales/testslotqueue +import ./sales/testsalesagent -suite "Sales": - - let availability = Availability.init( - size=100.u256, - duration=60.u256, - minPrice=600.u256 - ) - var request = StorageRequest( - ask: StorageAsk( - slots: 4, - slotSize: 100.u256, - duration: 60.u256, - reward: 10.u256, - ), - content: StorageContent( - cid: "some cid" - ) - ) - let proof = exampleProof() - - var sales: Sales - var market: MockMarket - var clock: MockClock - var proving: Proving - - setup: - market = MockMarket.new() - clock = MockClock.new() - proving = Proving.new() - sales = Sales.new(market, clock, proving) - sales.onStore = proc(request: StorageRequest, - slot: UInt256, - availability: Availability) {.async.} = - discard - sales.onProve = proc(request: StorageRequest, - slot: UInt256): Future[seq[byte]] {.async.} = - return proof - await sales.start() - request.expiry = (clock.now() + 42).u256 - - teardown: - await sales.stop() - - test "has no availability initially": - check sales.available.len == 0 - - test "can add available storage": - let availability1 = Availability.example - let availability2 = Availability.example - sales.add(availability1) - check sales.available.contains(availability1) - sales.add(availability2) - check sales.available.contains(availability1) - check sales.available.contains(availability2) - - test "can remove available storage": - sales.add(availability) - sales.remove(availability) - check sales.available.len == 0 - - test "generates unique ids for storage availability": - let availability1 = Availability.init(1.u256, 2.u256, 3.u256) - let availability2 = Availability.init(1.u256, 2.u256, 3.u256) - check availability1.id != availability2.id - - test "makes storage unavailable when matching request comes in": - sales.add(availability) - await market.requestStorage(request) - check sales.available.len == 0 - - test "ignores request when no matching storage is available": - sales.add(availability) - var tooBig = request - tooBig.ask.slotSize = request.ask.slotSize + 1 - await market.requestStorage(tooBig) - check sales.available == @[availability] - - test "ignores request when reward is too low": - sales.add(availability) - var tooCheap = request - tooCheap.ask.reward = request.ask.reward - 1 - await market.requestStorage(tooCheap) - check sales.available == @[availability] - - test "retrieves and stores data locally": - var storingRequest: StorageRequest - var storingSlot: UInt256 - var storingAvailability: Availability - sales.onStore = proc(request: StorageRequest, - slot: UInt256, - availability: Availability) {.async.} = - storingRequest = request - storingSlot = slot - storingAvailability = availability - sales.add(availability) - await market.requestStorage(request) - check storingRequest == request - check storingSlot < request.ask.slots.u256 - check storingAvailability == availability - - test "makes storage available again when data retrieval fails": - let error = newException(IOError, "data retrieval failed") - sales.onStore = proc(request: StorageRequest, - slot: UInt256, - availability: Availability) {.async.} = - raise error - sales.add(availability) - await market.requestStorage(request) - check sales.available == @[availability] - - test "generates proof of storage": - var provingRequest: StorageRequest - var provingSlot: UInt256 - sales.onProve = proc(request: StorageRequest, - slot: UInt256): Future[seq[byte]] {.async.} = - provingRequest = request - provingSlot = slot - sales.add(availability) - await market.requestStorage(request) - check provingRequest == request - check provingSlot < request.ask.slots.u256 - - test "fills a slot": - sales.add(availability) - await market.requestStorage(request) - check market.filled.len == 1 - check market.filled[0].requestId == request.id - check market.filled[0].slotIndex < request.ask.slots.u256 - check market.filled[0].proof == proof - check market.filled[0].host == await market.getSigner() - - test "calls onSale when slot is filled": - var soldAvailability: Availability - var soldRequest: StorageRequest - var soldSlotIndex: UInt256 - sales.onSale = proc(availability: Availability, - request: StorageRequest, - slotIndex: UInt256) = - soldAvailability = availability - soldRequest = request - soldSlotIndex = slotIndex - sales.add(availability) - await market.requestStorage(request) - check soldAvailability == availability - check soldRequest == request - check soldSlotIndex < request.ask.slots.u256 - - test "calls onClear when storage becomes available again": - # fail the proof intentionally to trigger `agent.finish(success=false)`, - # which then calls the onClear callback - sales.onProve = proc(request: StorageRequest, - slot: UInt256): Future[seq[byte]] {.async.} = - raise newException(IOError, "proof failed") - var clearedAvailability: Availability - var clearedRequest: StorageRequest - var clearedSlotIndex: UInt256 - sales.onClear = proc(availability: Availability, - request: StorageRequest, - slotIndex: UInt256) = - clearedAvailability = availability - clearedRequest = request - clearedSlotIndex = slotIndex - sales.add(availability) - await market.requestStorage(request) - check clearedAvailability == availability - check clearedRequest == request - check clearedSlotIndex < request.ask.slots.u256 - - test "makes storage available again when other host fills the slot": - let otherHost = Address.example - sales.onStore = proc(request: StorageRequest, - slot: UInt256, - availability: Availability) {.async.} = - await sleepAsync(1.hours) - sales.add(availability) - await market.requestStorage(request) - for slotIndex in 0.. blockSize": @@ -87,7 +86,7 @@ suite "StoreStream": else: check read == 1 - check sequential_bytes(buf,read,n) + check sequentialBytes(buf,read,n) n += read test "Read exact bytes within block boundary": @@ -95,11 +94,48 @@ suite "StoreStream": buf = newSeq[byte](5) await stream.readExactly(addr buf[0], 5) - check sequential_bytes(buf,5,0) + check sequentialBytes(buf,5,0) test "Read exact bytes outside of block boundary": var buf = newSeq[byte](15) await stream.readExactly(addr buf[0], 15) - check sequential_bytes(buf,15,0) + check sequentialBytes(buf,15,0) + +suite "StoreStream - Size Tests": + + var stream: StoreStream + + teardown: + await stream.close() + + test "Should return dataset size as stream size": + let manifest = Manifest.new( + treeCid = Cid.example, + datasetSize = 80.NBytes, + blockSize = 10.NBytes + ) + + stream = StoreStream.new(CacheStore.new(), manifest) + + check stream.size == 80 + + test "Should not count parity/padding bytes as part of stream size": + let protectedManifest = Manifest.new( + treeCid = Cid.example, + datasetSize = 120.NBytes, # size including parity bytes + blockSize = 10.NBytes, + version = CIDv1, + hcodec = Sha256HashCodec, + codec = BlockCodec, + ecK = 2, + ecM = 1, + originalTreeCid = Cid.example, + originalDatasetSize = 80.NBytes, # size without parity bytes + strategy = StrategyType.SteppedStrategy + ) + + stream = StoreStream.new(CacheStore.new(), protectedManifest) + + check stream.size == 80 diff --git a/tests/codex/testsystemclock.nim b/tests/codex/testsystemclock.nim new file mode 100644 index 00000000..6f743283 --- /dev/null +++ b/tests/codex/testsystemclock.nim @@ -0,0 +1,14 @@ +import std/times +import std/unittest + +import codex/systemclock +import ./helpers + +checksuite "SystemClock": + test "Should get now": + let clock = SystemClock.new() + + let expectedNow = times.now().utc + let now = clock.now() + + check now == expectedNow.toTime().toUnix() diff --git a/tests/codex/testutils.nim b/tests/codex/testutils.nim index 3b3b7c24..1a4a9469 100644 --- a/tests/codex/testutils.nim +++ b/tests/codex/testutils.nim @@ -1,5 +1,9 @@ -import ./utils/teststatemachine -import ./utils/testoptionalcast +import ./utils/testoptions import ./utils/testkeyutils +import ./utils/testasyncstatemachine +import ./utils/testasynciter +import ./utils/testtimer +import ./utils/testthen +import ./utils/testtrackedfutures {.warning[UnusedImport]: off.} diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim new file mode 100644 index 00000000..b84c56c3 --- /dev/null +++ b/tests/codex/testvalidation.nim @@ -0,0 +1,73 @@ +import pkg/chronos + +import codex/validation +import codex/periods + +import ../asynctest +import ./helpers/mockmarket +import ./helpers/mockclock +import ./examples +import ./helpers + +asyncchecksuite "validation": + let period = 10 + let timeout = 5 + let maxSlots = 100 + let slot = Slot.example + let proof = Groth16Proof.example + let collateral = slot.request.ask.collateral + + var validation: Validation + var market: MockMarket + var clock: MockClock + + setup: + market = MockMarket.new() + clock = MockClock.new() + validation = Validation.new(clock, market, maxSlots) + market.config.proofs.period = period.u256 + market.config.proofs.timeout = timeout.u256 + await validation.start() + + teardown: + await validation.stop() + + proc advanceToNextPeriod = + let periodicity = Periodicity(seconds: period.u256) + let period = periodicity.periodOf(clock.now().u256) + let periodEnd = periodicity.periodEnd(period) + clock.set((periodEnd + 1).truncate(int)) + + test "the list of slots that it's monitoring is empty initially": + check validation.slots.len == 0 + + test "when a slot is filled on chain, it is added to the list": + await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) + check validation.slots == @[slot.id] + + for state in [SlotState.Finished, SlotState.Failed]: + test "when slot state changes, it is removed from the list": + await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) + market.slotState[slot.id] = state + advanceToNextPeriod() + check eventually validation.slots.len == 0 + + test "when a proof is missed, it is marked as missing": + await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) + market.setCanProofBeMarkedAsMissing(slot.id, true) + advanceToNextPeriod() + await sleepAsync(1.millis) + check market.markedAsMissingProofs.contains(slot.id) + + test "when a proof can not be marked as missing, it will not be marked": + await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) + market.setCanProofBeMarkedAsMissing(slot.id, false) + advanceToNextPeriod() + await sleepAsync(1.millis) + check market.markedAsMissingProofs.len == 0 + + test "it does not monitor more than the maximum number of slots": + for _ in 0.. $i) + + check: + iter.toSeq() == @["0", "1", "2", "3", "4"] + + test "Should leave only odd items using `filter`": + let iter = Iter.new(0..<5) + .filter((i: int) => (i mod 2) == 1) + + check: + iter.toSeq() == @[1, 3] + + test "Should leave only odd items using `mapFilter`": + let + iter1 = Iter.new(0..<5) + iter2 = mapFilter[int, string](iter1, + proc(i: int): ?string = + if (i mod 2) == 1: + some($i) + else: + string.none + ) + + check: + iter2.toSeq() == @["1", "3"] + + test "Should yield all items before err using `map`": + let + iter = Iter.new(0..<5) + .map( + proc (i: int): string = + if i < 3: + return $i + else: + raise newException(CatchableError, "Some error") + ) + + var collected: seq[string] + + expect CatchableError: + for i in iter: + collected.add(i) + + check: + collected == @["0", "1", "2"] + iter.finished + + test "Should yield all items before err using `filter`": + let + iter = Iter.new(0..<5) + .filter( + proc (i: int): bool = + if i < 3: + return true + else: + raise newException(CatchableError, "Some error") + ) + + var collected: seq[int] + + expect CatchableError: + for i in iter: + collected.add(i) + + check: + collected == @[0, 1, 2] + iter.finished + + test "Should yield all items before err using `mapFilter`": + let + iter1 = Iter.new(0..<5) + iter2 = mapFilter[int, string](iter1, + proc (i: int): ?string = + if i < 3: + return some($i) + else: + raise newException(CatchableError, "Some error") + ) + + var collected: seq[string] + + expect CatchableError: + for i in iter2: + collected.add(i) + + check: + collected == @["0", "1", "2"] + iter2.finished diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index 24b5f0ca..2a33818a 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -1,14 +1,13 @@ import std/unittest import std/os -import pkg/libp2p -import pkg/questionable/results +import pkg/questionable import codex/utils/keyutils +import ../helpers when defined(windows): import stew/windows/acl -suite "keyutils": - +checksuite "keyutils": let path = getTempDir() / "CodexTest" setup: @@ -18,17 +17,17 @@ suite "keyutils": os.removeDir(path) test "creates a key file when it does not exist yet": - check setupKey(path / "keyfile").isSuccess + check setupKey(path / "keyfile").isOk check fileExists(path / "keyfile") test "stores key in a file that's only readable by the user": - discard !setupKey(path / "keyfile") + discard setupKey(path / "keyfile").get() when defined(posix): check getFilePermissions(path / "keyfile") == {fpUserRead, fpUserWrite} when defined(windows): check checkCurrentUserOnlyACL(path / "keyfile").get() test "reads key file when it does exist": - let key = !setupKey(path / "keyfile") - check !setupKey(path / "keyfile") == key + let key = setupKey(path / "keyfile").get() + check setupKey(path / "keyfile").get() == key diff --git a/tests/codex/utils/testoptionalcast.nim b/tests/codex/utils/testoptions.nim similarity index 58% rename from tests/codex/utils/testoptionalcast.nim rename to tests/codex/utils/testoptions.nim index e945df40..eb566ad7 100644 --- a/tests/codex/utils/testoptionalcast.nim +++ b/tests/codex/utils/testoptions.nim @@ -1,8 +1,8 @@ import std/unittest -import codex/utils/optionalcast - -suite "optional casts": +import codex/utils/options +import ../helpers +checksuite "optional casts": test "casting value to same type works": check 42 as int == some 42 @@ -28,3 +28,22 @@ suite "optional casts": check 42.some as int == some 42 check 42.some as string == string.none check int.none as int == int.none + +checksuite "Optionalize": + test "does not except non-object types": + static: + doAssert not compiles(Optionalize(int)) + + test "converts object fields to option": + type BaseType = object + a: int + b: bool + c: string + d: Option[string] + + type OptionalizedType = Optionalize(BaseType) + + check OptionalizedType.a is Option[int] + check OptionalizedType.b is Option[bool] + check OptionalizedType.c is Option[string] + check OptionalizedType.d is Option[string] diff --git a/tests/codex/utils/teststatemachine.nim b/tests/codex/utils/teststatemachine.nim deleted file mode 100644 index 84a573c3..00000000 --- a/tests/codex/utils/teststatemachine.nim +++ /dev/null @@ -1,48 +0,0 @@ -import std/unittest -import pkg/questionable -import codex/utils/statemachine - -type - Light = ref object of StateMachine - On = ref object of State - Off = ref object of State - -var enteredOn: bool -var exitedOn: bool - -method enter(state: On) = - enteredOn = true - -method exit(state: On) = - exitedOn = true - -suite "state machines": - - setup: - enteredOn = false - exitedOn = false - - test "calls `enter` when entering state": - Light().switch(On()) - check enteredOn - - test "calls `exit` when exiting state": - let light = Light() - light.switch(On()) - check not exitedOn - light.switch(Off()) - check exitedOn - - test "allows access to state machine from state": - let light = Light() - let on = On() - check not isSome on.context - light.switch(on) - check on.context == some StateMachine(light) - - test "removes access to state machine when state exited": - let light = Light() - let on = On() - light.switch(on) - light.switch(Off()) - check not isSome on.context diff --git a/tests/codex/utils/testthen.nim b/tests/codex/utils/testthen.nim new file mode 100644 index 00000000..a66e8cd2 --- /dev/null +++ b/tests/codex/utils/testthen.nim @@ -0,0 +1,414 @@ +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import codex/utils/then + +import ../../asynctest +import ../helpers + +proc newError(): ref CatchableError = + (ref CatchableError)(msg: "some error") + +asyncchecksuite "then - Future[void]": + var error = newError() + var future: Future[void] + + setup: + future = newFuture[void]("test void") + + teardown: + if not future.finished: + raiseAssert "test should finish future" + + test "then callback is fired when future is already finished": + var firedImmediately = false + future.complete() + discard future.then(proc() = firedImmediately = true) + check eventually firedImmediately + + test "then callback is fired after future is finished": + var fired = false + discard future.then(proc() = fired = true) + future.complete() + check eventually fired + + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error + + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error + + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired + + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired + + test "does not fire other callbacks when successful": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete() + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.fail(error) + + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) + + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": + var onSuccessCalledTimes = 0 + discard future + .then(proc() = inc onSuccessCalledTimes) + .then(proc() = inc onSuccessCalledTimes) + .then(proc() = inc onSuccessCalledTimes) + future.complete() + check eventually onSuccessCalledTimes == 3 + +asyncchecksuite "then - Future[T]": + var error = newError() + var future: Future[int] + + setup: + future = newFuture[int]("test void") + + teardown: + if not future.finished: + raiseAssert "test should finish future" + + test "then callback is fired when future is already finished": + var cbVal = 0 + future.complete(1) + discard future.then(proc(val: int) = cbVal = val) + check eventually cbVal == 1 + + test "then callback is fired after future is finished": + var cbVal = 0 + discard future.then(proc(val: int) = cbVal = val) + future.complete(1) + check eventually cbVal == 1 + + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error + + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error + + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired + + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired + + test "does not fire other callbacks when successful": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete(1) + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.fail(error) + + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) + + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": + var onSuccessCalledTimes = 0 + discard future + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + future.complete(1) + check eventually onSuccessCalledTimes == 3 + +asyncchecksuite "then - Future[?!void]": + var error = newError() + var future: Future[?!void] + + setup: + future = newFuture[?!void]("test void") + + teardown: + if not future.finished: + raiseAssert "test should finish future" + + test "then callback is fired when future is already finished": + var firedImmediately = false + future.complete(success()) + discard future.then(proc() = firedImmediately = true) + check eventually firedImmediately + + test "then callback is fired after future is finished": + var fired = false + discard future.then(proc() = fired = true) + future.complete(success()) + check eventually fired + + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error + + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error + + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired + + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired + + test "does not fire other callbacks when successful": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete(success()) + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.fail(error) + + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) + + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": + var onSuccessCalledTimes = 0 + discard future + .then(proc() = inc onSuccessCalledTimes) + .then(proc() = inc onSuccessCalledTimes) + .then(proc() = inc onSuccessCalledTimes) + future.complete(success()) + check eventually onSuccessCalledTimes == 3 + +asyncchecksuite "then - Future[?!T]": + var error = newError() + var future: Future[?!int] + + setup: + future = newFuture[?!int]("test void") + + teardown: + if not future.finished: + raiseAssert "test should finish future" + + test "then callback is fired when future is already finished": + var cbVal = 0 + future.complete(success(1)) + discard future.then(proc(val: int) = cbVal = val) + check eventually cbVal == 1 + + test "then callback is fired after future is finished": + var cbVal = 0 + discard future.then(proc(val: int) = cbVal = val) + future.complete(success(1)) + check eventually cbVal == 1 + + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error + + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error + + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired + + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired + + test "does not fire other callbacks when successful": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete(success(1)) + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.fail(error) + + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) + + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": + var onSuccessCalledTimes = 0 + discard future + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + future.complete(success(1)) + check eventually onSuccessCalledTimes == 3 diff --git a/tests/codex/utils/testtimer.nim b/tests/codex/utils/testtimer.nim new file mode 100644 index 00000000..303c43fb --- /dev/null +++ b/tests/codex/utils/testtimer.nim @@ -0,0 +1,84 @@ +## Nim-Codex +## Copyright (c) 2023 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import pkg/chronos + +import codex/utils/timer + +import ../../asynctest +import ../helpers + +asyncchecksuite "Timer": + var timer1: Timer + var timer2: Timer + var output: string + var numbersState = 0 + var lettersState = 'a' + + proc numbersCallback(): Future[void] {.async.} = + output &= $numbersState + inc numbersState + + proc lettersCallback(): Future[void] {.async.} = + output &= $lettersState + inc lettersState + + proc exceptionCallback(): Future[void] {.async.} = + raise newException(CatchableError, "Test Exception") + + proc startNumbersTimer() = + timer1.start(numbersCallback, 10.milliseconds) + + proc startLettersTimer() = + timer2.start(lettersCallback, 10.milliseconds) + + setup: + timer1 = Timer.new() + timer2 = Timer.new() + + output = "" + numbersState = 0 + lettersState = 'a' + + teardown: + await timer1.stop() + await timer2.stop() + + test "Start timer1 should execute callback": + startNumbersTimer() + check eventually output == "0" + + test "Start timer1 should execute callback multiple times": + startNumbersTimer() + check eventually output == "012" + + test "Starting timer1 multiple times has no impact": + startNumbersTimer() + startNumbersTimer() + startNumbersTimer() + check eventually output == "01234" + + test "Stop timer1 should stop execution of the callback": + startNumbersTimer() + check eventually output == "012" + await timer1.stop() + await sleepAsync(30.milliseconds) + let stoppedOutput = output + await sleepAsync(30.milliseconds) + check output == stoppedOutput + + test "Exceptions raised in timer callback are handled": + timer1.start(exceptionCallback, 10.milliseconds) + await sleepAsync(30.milliseconds) + await timer1.stop() + + test "Starting both timers should execute callbacks sequentially": + startNumbersTimer() + startLettersTimer() + check eventually output == "0a1b2c3d4e" diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim new file mode 100644 index 00000000..9274f84f --- /dev/null +++ b/tests/codex/utils/testtrackedfutures.nim @@ -0,0 +1,67 @@ +import pkg/chronos +import codex/utils/trackedfutures + +import ../../asynctest +import ../helpers + +type Module = object + trackedFutures: TrackedFutures + +asyncchecksuite "tracked futures": + var module: Module + + setup: + module = Module(trackedFutures: TrackedFutures.new()) + + test "starts with zero tracked futures": + check module.trackedFutures.len == 0 + + test "tracks unfinished futures": + let fut = newFuture[void]("test") + discard fut.track(module) + check module.trackedFutures.len == 1 + + test "does not track completed futures": + let fut = newFuture[void]("test") + fut.complete() + discard fut.track(module) + check eventually module.trackedFutures.len == 0 + + test "does not track failed futures": + let fut = newFuture[void]("test") + fut.fail((ref CatchableError)(msg: "some error")) + discard fut.track(module) + check eventually module.trackedFutures.len == 0 + + test "does not track cancelled futures": + let fut = newFuture[void]("test") + await fut.cancelAndWait() + discard fut.track(module) + check eventually module.trackedFutures.len == 0 + + test "removes tracked future when finished": + let fut = newFuture[void]("test") + discard fut.track(module) + fut.complete() + check eventually module.trackedFutures.len == 0 + + test "removes tracked future when cancelled": + let fut = newFuture[void]("test") + discard fut.track(module) + await fut.cancelAndWait() + check eventually module.trackedFutures.len == 0 + + test "cancels and removes all tracked futures": + let fut1 = newFuture[void]("test1") + let fut2 = newFuture[void]("test2") + let fut3 = newFuture[void]("test3") + discard fut1.track(module) + discard fut2.track(module) + discard fut3.track(module) + await module.trackedFutures.cancelTracked() + check eventually fut1.cancelled + check eventually fut2.cancelled + check eventually fut3.cancelled + check eventually module.trackedFutures.len == 0 + + diff --git a/tests/codex/utils/testutils.nim b/tests/codex/utils/testutils.nim new file mode 100644 index 00000000..b8e386d0 --- /dev/null +++ b/tests/codex/utils/testutils.nim @@ -0,0 +1,36 @@ +import std/unittest + +import pkg/codex/utils + +suite "findIt": + + setup: + type AnObject = object + attribute1*: int + + var objList = @[ + AnObject(attribute1: 1), + AnObject(attribute1: 3), + AnObject(attribute1: 5), + AnObject(attribute1: 3), + ] + + test "should retur index of first object matching predicate": + assert objList.findIt(it.attribute1 == 3) == 1 + + test "should return -1 when no object matches predicate": + assert objList.findIt(it.attribute1 == 15) == -1 + +suite "parseDuration": + + test "should parse durations": + var res: Duration # caller must still know if 'b' refers to bytes|bits + check parseDuration("10Hr", res) == 3 + check res == hours(10) + check parseDuration("64min", res) == 3 + check res == minutes(64) + check parseDuration("7m/block", res) == 2 # '/' stops parse + check res == minutes(7) # 1 shl 30, forced binary metric + check parseDuration("3d", res) == 2 # '/' stops parse + check res == days(3) # 1 shl 30, forced binary metric + diff --git a/tests/config.nims b/tests/config.nims new file mode 100644 index 00000000..55858427 --- /dev/null +++ b/tests/config.nims @@ -0,0 +1,8 @@ +--path:".." +--threads:on +--tlsEmulation:off + +when not defined(chronicles_log_level): + --define:"chronicles_log_level:NONE" # compile all log statements + --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime + --"import":"logging" # ensure that logging is ignored at runtime diff --git a/tests/contracts/deployment.nim b/tests/contracts/deployment.nim new file mode 100644 index 00000000..f62bb1be --- /dev/null +++ b/tests/contracts/deployment.nim @@ -0,0 +1,22 @@ +import std/os +import std/options +import pkg/ethers +import pkg/codex/contracts/marketplace + +const hardhatMarketAddress = + Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44").get() +const hardhatMarketWithDummyVerifier = + Address.init("0xa85233C63b9Ee964Add6F2cffe00Fd84eb32338f").get() +const marketAddressEnvName = "CODEX_MARKET_ADDRESS" + +proc address*(_: type Marketplace, dummyVerifier = false): Address = + if existsEnv(marketAddressEnvName): + without address =? Address.init(getEnv(marketAddressEnvName)): + raiseAssert "Invalid env. variable marketplace contract address" + + return address + + if dummyVerifier: + hardhatMarketWithDummyVerifier + else: + hardhatMarketAddress diff --git a/tests/contracts/testClock.nim b/tests/contracts/testClock.nim index db97d70e..40f7aed0 100644 --- a/tests/contracts/testClock.nim +++ b/tests/contracts/testClock.nim @@ -1,43 +1,41 @@ import std/times import pkg/chronos import codex/contracts/clock +import codex/utils/json import ../ethertest ethersuite "On-Chain Clock": - var clock: OnChainClock setup: - clock = OnChainClock.new(provider) + clock = OnChainClock.new(ethProvider) await clock.start() teardown: await clock.stop() test "returns the current time of the EVM": - let latestBlock = (!await provider.getBlock(BlockTag.latest)) + let latestBlock = (!await ethProvider.getBlock(BlockTag.latest)) let timestamp = latestBlock.timestamp.truncate(int64) check clock.now() == timestamp test "updates time with timestamp of new blocks": let future = (getTime() + 42.years).toUnix - discard await provider.send("evm_setNextBlockTimestamp", @[%future]) - discard await provider.send("evm_mine") - check clock.now() == future + discard await ethProvider.send("evm_setNextBlockTimestamp", @[%future]) + discard await ethProvider.send("evm_mine") + check eventually clock.now() == future - test "updates time using wall-clock in-between blocks": - let past = clock.now() - await sleepAsync(chronos.seconds(1)) - check clock.now() > past + test "can wait until a certain time is reached by the chain": + let future = clock.now() + 42 # seconds + let waiting = clock.waitUntil(future) + discard await ethProvider.send("evm_setNextBlockTimestamp", @[%future]) + discard await ethProvider.send("evm_mine") + check await waiting.withTimeout(chronos.milliseconds(100)) - test "raises when not started": - expect AssertionError: - discard OnChainClock.new(provider).now() - - test "raises when stopped": - await clock.stop() - expect AssertionError: - discard clock.now() + test "can wait until a certain time is reached by the wall-clock": + let future = clock.now() + 1 # seconds + let waiting = clock.waitUntil(future) + check await waiting.withTimeout(chronos.seconds(2)) test "handles starting multiple times": await clock.start() diff --git a/tests/contracts/testCollateral.nim b/tests/contracts/testCollateral.nim deleted file mode 100644 index 6d2079b8..00000000 --- a/tests/contracts/testCollateral.nim +++ /dev/null @@ -1,32 +0,0 @@ -import pkg/chronos -import pkg/stint -import codex/contracts -import codex/contracts/testtoken -import ../ethertest - -ethersuite "Collateral": - - let collateralAmount = 100.u256 - - var storage: Storage - var token: TestToken - - setup: - let deployment = deployment() - storage = Storage.new(!deployment.address(Storage), provider.getSigner()) - token = TestToken.new(!deployment.address(TestToken), provider.getSigner()) - await token.mint(accounts[0], 1000.u256) - - test "increases collateral": - await token.approve(storage.address, collateralAmount) - await storage.deposit(collateralAmount) - let collateral = await storage.balanceOf(accounts[0]) - check collateral == collateralAmount - - test "withdraws collateral": - await token.approve(storage.address, collateralAmount) - await storage.deposit(collateralAmount) - let balanceBefore = await token.balanceOf(accounts[0]) - await storage.withdraw() - let balanceAfter = await token.balanceOf(accounts[0]) - check (balanceAfter - balanceBefore) == collateralAmount diff --git a/tests/contracts/testContracts.nim b/tests/contracts/testContracts.nim index 3063b899..4432160d 100644 --- a/tests/contracts/testContracts.nim +++ b/tests/contracts/testContracts.nim @@ -1,92 +1,95 @@ -import std/json import pkg/chronos import pkg/ethers/testing +import pkg/ethers/erc20 import codex/contracts -import codex/contracts/testtoken -import codex/storageproofs import ../ethertest import ./examples import ./time +import ./deployment -ethersuite "Storage contracts": - let proof = exampleProof() +ethersuite "Marketplace contracts": + let proof = Groth16Proof.example var client, host: Signer - var storage: Storage - var token: TestToken - var collateralAmount: UInt256 + var marketplace: Marketplace + var token: Erc20Token var periodicity: Periodicity var request: StorageRequest var slotId: SlotId proc switchAccount(account: Signer) = - storage = storage.connect(account) + marketplace = marketplace.connect(account) token = token.connect(account) setup: - client = provider.getSigner(accounts[0]) - host = provider.getSigner(accounts[1]) + client = ethProvider.getSigner(accounts[0]) + host = ethProvider.getSigner(accounts[1]) - let deployment = deployment() - storage = Storage.new(!deployment.address(Storage), provider.getSigner()) - token = TestToken.new(!deployment.address(TestToken), provider.getSigner()) + let address = Marketplace.address(dummyVerifier = true) + marketplace = Marketplace.new(address, ethProvider.getSigner()) - await token.mint(await client.getAddress(), 1_000_000_000.u256) - await token.mint(await host.getAddress(), 1000_000_000.u256) + let tokenAddress = await marketplace.token() + token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) - collateralAmount = await storage.collateralAmount() - periodicity = Periodicity(seconds: await storage.proofPeriod()) + let config = await marketplace.config() + periodicity = Periodicity(seconds: config.proofs.period) request = StorageRequest.example request.client = await client.getAddress() switchAccount(client) - await token.approve(storage.address, request.price) - await storage.requestStorage(request) + discard await token.approve(marketplace.address, request.price) + discard await marketplace.requestStorage(request) switchAccount(host) - await token.approve(storage.address, collateralAmount) - await storage.deposit(collateralAmount) - await storage.fillSlot(request.id, 0.u256, proof) + discard await token.approve(marketplace.address, request.ask.collateral) + discard await marketplace.fillSlot(request.id, 0.u256, proof) slotId = request.slotId(0.u256) proc waitUntilProofRequired(slotId: SlotId) {.async.} = - let currentPeriod = periodicity.periodOf(await provider.currentTime()) - await provider.advanceTimeTo(periodicity.periodEnd(currentPeriod)) + let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) + await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod)) while not ( - (await storage.isProofRequired(slotId)) and - (await storage.getPointer(slotId)) < 250 + (await marketplace.isProofRequired(slotId)) and + (await marketplace.getPointer(slotId)) < 250 ): - await provider.advanceTime(periodicity.seconds) + await ethProvider.advanceTime(periodicity.seconds) proc startContract() {.async.} = for slotIndex in 1..= 3, "must be more than 3 blocks" + let rng = Rng.instance() + let chunker = RandomChunker.new( + rng, size = DefaultBlockSize * blocks.NBytes, chunkSize = DefaultBlockSize) + var data: seq[byte] + while (let moar = await chunker.getBytes(); moar != []): + data.add moar + return byteutils.toHex(data) + +proc example*(_: type RandomChunker): Future[string] {.async.} = + await RandomChunker.example(3) diff --git a/tests/helpers.nim b/tests/helpers.nim new file mode 100644 index 00000000..a6a6ff44 --- /dev/null +++ b/tests/helpers.nim @@ -0,0 +1,5 @@ +import helpers/multisetup +import helpers/trackers +import helpers/templeveldb + +export multisetup, trackers, templeveldb diff --git a/tests/helpers/multisetup.nim b/tests/helpers/multisetup.nim new file mode 100644 index 00000000..781b0062 --- /dev/null +++ b/tests/helpers/multisetup.nim @@ -0,0 +1,42 @@ +import pkg/chronos + +# Allow multiple setups and teardowns in a test suite +template asyncmultisetup* = + var setups: seq[proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] + var teardowns: seq[ + proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] + + setup: + for setup in setups: + await setup() + + teardown: + for teardown in teardowns: + await teardown() + + template setup(setupBody) {.inject, used.} = + setups.add(proc {.async: ( + handleException: true, raises: [AsyncExceptionError]).} = setupBody) + + template teardown(teardownBody) {.inject, used.} = + teardowns.insert(proc {.async: ( + handleException: true, raises: [AsyncExceptionError]).} = teardownBody) + +template multisetup* = + var setups: seq[proc() {.gcsafe.}] + var teardowns: seq[proc() {.gcsafe.}] + + setup: + for setup in setups: + setup() + + teardown: + for teardown in teardowns: + teardown() + + template setup(setupBody) {.inject, used.} = + let setupProc = proc = setupBody + setups.add(setupProc) + + template teardown(teardownBody) {.inject, used.} = + teardowns.insert(proc = teardownBody) diff --git a/tests/helpers/templeveldb.nim b/tests/helpers/templeveldb.nim new file mode 100644 index 00000000..97b40553 --- /dev/null +++ b/tests/helpers/templeveldb.nim @@ -0,0 +1,30 @@ +import os +import std/monotimes +import pkg/datastore +import pkg/chronos +import pkg/questionable/results + +type + TempLevelDb* = ref object + currentPath: string + ds: LevelDbDatastore + +var number = 0 + +proc newDb*(self: TempLevelDb): Datastore = + if self.currentPath.len > 0: + raiseAssert("TempLevelDb already active.") + self.currentPath = getTempDir() / "templeveldb" / $number / $getmonotime() + inc number + createdir(self.currentPath) + self.ds = LevelDbDatastore.new(self.currentPath).tryGet() + return self.ds + +proc destroyDb*(self: TempLevelDb): Future[void] {.async.} = + if self.currentPath.len == 0: + raiseAssert("TempLevelDb not active.") + try: + (await self.ds.close()).tryGet() + finally: + removedir(self.currentPath) + self.currentPath = "" diff --git a/tests/helpers/trackers.nim b/tests/helpers/trackers.nim new file mode 100644 index 00000000..f4b10a2e --- /dev/null +++ b/tests/helpers/trackers.nim @@ -0,0 +1,30 @@ +import pkg/codex/streams/storestream +import std/unittest + +# From lip2p/tests/helpers +const trackerNames = [ + StoreStreamTrackerName + ] + +iterator testTrackers*(extras: openArray[string] = []): TrackerBase = + for name in trackerNames: + let t = getTracker(name) + if not isNil(t): yield t + for name in extras: + let t = getTracker(name) + if not isNil(t): yield t + +proc checkTracker*(name: string) = + var tracker = getTracker(name) + if tracker.isLeaked(): + checkpoint tracker.dump() + fail() + +proc checkTrackers*() = + for tracker in testTrackers(): + if tracker.isLeaked(): + checkpoint tracker.dump() + fail() + try: + GC_fullCollect() + except: discard diff --git a/tests/integration/clioption.nim b/tests/integration/clioption.nim new file mode 100644 index 00000000..5f756d80 --- /dev/null +++ b/tests/integration/clioption.nim @@ -0,0 +1,10 @@ +type + CliOption* = object + key*: string # option key, including `--` + value*: string # option value + +proc `$`*(option: CliOption): string = + var res = option.key + if option.value.len > 0: + res &= "=" & option.value + return res diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim new file mode 100644 index 00000000..d2d78b46 --- /dev/null +++ b/tests/integration/codexclient.nim @@ -0,0 +1,239 @@ +import std/httpclient +import std/strutils + +from pkg/libp2p import Cid, `$`, init +import pkg/stint +import pkg/questionable/results +import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient] +import pkg/codex/logutils +import pkg/codex/rest/json +import pkg/codex/purchasing +import pkg/codex/errors +import pkg/codex/sales/reservations + +export purchasing + +type CodexClient* = ref object + http: HttpClient + baseurl: string + session: HttpSessionRef + +type CodexClientError* = object of CatchableError + +proc new*(_: type CodexClient, baseurl: string): CodexClient = + CodexClient( + http: newHttpClient(), + baseurl: baseurl, + session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}) + ) + +proc info*(client: CodexClient): ?!JsonNode = + let url = client.baseurl & "/debug/info" + JsonNode.parse( client.http.getContent(url) ) + +proc setLogLevel*(client: CodexClient, level: string) = + let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level + let headers = newHttpHeaders({"Content-Type": "text/plain"}) + let response = client.http.request(url, httpMethod=HttpPost, headers=headers) + assert response.status == "200 OK" + +proc upload*(client: CodexClient, contents: string): ?!Cid = + let response = client.http.post(client.baseurl & "/data", contents) + assert response.status == "200 OK" + Cid.init(response.body).mapFailure + +proc download*(client: CodexClient, cid: Cid, local = false): ?!string = + let + response = client.http.get( + client.baseurl & "/data/" & $cid & + (if local: "" else: "/network")) + + if response.status != "200 OK": + return failure(response.status) + + success response.body + +proc downloadBytes*( + client: CodexClient, + cid: Cid, + local = false): Future[?!seq[byte]] {.async.} = + + let uri = parseUri( + client.baseurl & "/data/" & $cid & + (if local: "" else: "/network") + ) + + let (status, bytes) = await client.session.fetch(uri) + + if status != 200: + return failure("fetch failed with status " & $status) + + success bytes + +proc list*(client: CodexClient): ?!RestContentList = + let url = client.baseurl & "/data" + let response = client.http.get(url) + + if response.status != "200 OK": + return failure(response.status) + + RestContentList.fromJson(response.body) + +proc space*(client: CodexClient): ?!RestRepoStore = + let url = client.baseurl & "/space" + let response = client.http.get(url) + + if response.status != "200 OK": + return failure(response.status) + + RestRepoStore.fromJson(response.body) + +proc requestStorageRaw*( + client: CodexClient, + cid: Cid, + duration: UInt256, + reward: UInt256, + proofProbability: UInt256, + collateral: UInt256, + expiry: uint = 0, + nodes: uint = 2, + tolerance: uint = 0 +): Response = + + ## Call request storage REST endpoint + ## + let url = client.baseurl & "/storage/request/" & $cid + let json = %*{ + "duration": duration, + "reward": reward, + "proofProbability": proofProbability, + "collateral": collateral, + "nodes": nodes, + "tolerance": tolerance + } + + if expiry != 0: + json["expiry"] = %($expiry) + + return client.http.post(url, $json) + +proc requestStorage*( + client: CodexClient, + cid: Cid, + duration: UInt256, + reward: UInt256, + proofProbability: UInt256, + expiry: uint, + collateral: UInt256, + nodes: uint = 2, + tolerance: uint = 0 +): ?!PurchaseId = + ## Call request storage REST endpoint + ## + let response = client.requestStorageRaw(cid, duration, reward, proofProbability, collateral, expiry, nodes, tolerance) + if response.status != "200 OK": + doAssert(false, response.body) + PurchaseId.fromHex(response.body).catch + +proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = + let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex + try: + let body = client.http.getContent(url) + return RestPurchase.fromJson(body) + except CatchableError as e: + return failure e.msg + +proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = + let url = client.baseurl & "/sales/slots/" & slotId.toHex + try: + let body = client.http.getContent(url) + return RestSalesAgent.fromJson(body) + except CatchableError as e: + return failure e.msg + +proc getSlots*(client: CodexClient): ?!seq[Slot] = + let url = client.baseurl & "/sales/slots" + let body = client.http.getContent(url) + seq[Slot].fromJson(body) + +proc postAvailability*( + client: CodexClient, + totalSize, duration, minPrice, maxCollateral: UInt256 +): ?!Availability = + ## Post sales availability endpoint + ## + let url = client.baseurl & "/sales/availability" + let json = %*{ + "totalSize": totalSize, + "duration": duration, + "minPrice": minPrice, + "maxCollateral": maxCollateral, + } + let response = client.http.post(url, $json) + doAssert response.status == "201 Created", "expected 201 Created, got " & response.status & ", body: " & response.body + Availability.fromJson(response.body) + +proc patchAvailabilityRaw*( + client: CodexClient, + availabilityId: AvailabilityId, + totalSize, freeSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none +): Response = + ## Updates availability + ## + let url = client.baseurl & "/sales/availability/" & $availabilityId + + # TODO: Optionalize macro does not keep `serialize` pragmas so we can't use `Optionalize(RestAvailability)` here. + var json = %*{} + + if totalSize =? totalSize: + json["totalSize"] = %totalSize + + if freeSize =? freeSize: + json["freeSize"] = %freeSize + + if duration =? duration: + json["duration"] = %duration + + if minPrice =? minPrice: + json["minPrice"] = %minPrice + + if maxCollateral =? maxCollateral: + json["maxCollateral"] = %maxCollateral + + client.http.patch(url, $json) + +proc patchAvailability*( + client: CodexClient, + availabilityId: AvailabilityId, + totalSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none +): void = + let response = client.patchAvailabilityRaw(availabilityId, totalSize=totalSize, duration=duration, minPrice=minPrice, maxCollateral=maxCollateral) + doAssert response.status == "200 OK", "expected 200 OK, got " & response.status + +proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = + ## Call sales availability REST endpoint + let url = client.baseurl & "/sales/availability" + let body = client.http.getContent(url) + seq[Availability].fromJson(body) + +proc getAvailabilityReservations*(client: CodexClient, availabilityId: AvailabilityId): ?!seq[Reservation] = + ## Retrieves Availability's Reservations + let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" + let body = client.http.getContent(url) + seq[Reservation].fromJson(body) + +proc close*(client: CodexClient) = + client.http.close() + +proc restart*(client: CodexClient) = + client.http.close() + client.http = newHttpClient() + +proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = + client.getPurchase(id).option.?state == some state + +proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = + client.getSalesAgent(id).option.?state == some state + +proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = + return client.getPurchase(id).option.?requestId diff --git a/tests/integration/codexconfig.nim b/tests/integration/codexconfig.nim new file mode 100644 index 00000000..5f15331b --- /dev/null +++ b/tests/integration/codexconfig.nim @@ -0,0 +1,295 @@ +import std/options +import std/os +import std/sequtils +import std/strutils +import std/sugar +import std/tables +from pkg/chronicles import LogLevel +import pkg/codex/conf +import pkg/codex/units +import pkg/confutils +import pkg/confutils/defs +import libp2p except setup +import pkg/questionable +import ./clioption + +export clioption +export confutils + +type + CodexConfigs* = object + configs*: seq[CodexConfig] + CodexConfig* = object + cliOptions: Table[StartUpCmd, Table[string, CliOption]] + cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]] + debugEnabled*: bool + CodexConfigError* = object of CatchableError + +proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} + +proc raiseCodexConfigError(msg: string) {.raises: [CodexConfigError].} = + raise newException(CodexConfigError, msg) + +template convertError(body) = + try: + body + except CatchableError as e: + raiseCodexConfigError e.msg + +proc init*(_: type CodexConfigs, nodes = 1): CodexConfigs {.raises: [].} = + CodexConfigs(configs: newSeq[CodexConfig](nodes)) + +func nodes*(self: CodexConfigs): int = + self.configs.len + +proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} = + if idx notin 0.. 0: + ": " & msg + else: "" + + try: + return CodexConf.load(cmdLine = config.cliArgs, quitOnFailure = false) + except ConfigurationError as e: + raiseCodexConfigError msg & e.msg.postFix + except Exception as e: + ## TODO: remove once proper exception handling added to nim-confutils + raiseCodexConfigError msg & e.msg.postFix + +proc addCliOption*( + config: var CodexConfig, + group = PersistenceCmd.noCmd, + cliOption: CliOption) {.raises: [CodexConfigError].} = + + var options = config.cliPersistenceOptions.getOrDefault(group) + options[cliOption.key] = cliOption # overwrite if already exists + config.cliPersistenceOptions[group] = options + discard config.buildConfig("Invalid cli arg " & $cliOption) + +proc addCliOption*( + config: var CodexConfig, + group = PersistenceCmd.noCmd, + key: string, value = "") {.raises: [CodexConfigError].} = + + config.addCliOption(group, CliOption(key: key, value: value)) + +proc addCliOption*( + config: var CodexConfig, + group = StartUpCmd.noCmd, + cliOption: CliOption) {.raises: [CodexConfigError].} = + + var options = config.cliOptions.getOrDefault(group) + options[cliOption.key] = cliOption # overwrite if already exists + config.cliOptions[group] = options + discard config.buildConfig("Invalid cli arg " & $cliOption) + +proc addCliOption*( + config: var CodexConfig, + group = StartUpCmd.noCmd, + key: string, value = "") {.raises: [CodexConfigError].} = + + config.addCliOption(group, CliOption(key: key, value: value)) + +proc addCliOption*( + config: var CodexConfig, + cliOption: CliOption) {.raises: [CodexConfigError].} = + + config.addCliOption(StartUpCmd.noCmd, cliOption) + +proc addCliOption*( + config: var CodexConfig, + key: string, value = "") {.raises: [CodexConfigError].} = + + config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value)) + +proc cliArgs*( + config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} = + ## converts CodexConfig cli options and command groups in a sequence of args + ## and filters out cli options by node index if provided in the CliOption + var args: seq[string] = @[] + + convertError: + for cmd in StartUpCmd: + if config.cliOptions.hasKey(cmd): + if cmd != StartUpCmd.noCmd: + args.add $cmd + var opts = config.cliOptions[cmd].values.toSeq + args = args.concat( opts.map(o => $o) ) + + for cmd in PersistenceCmd: + if config.cliPersistenceOptions.hasKey(cmd): + if cmd != PersistenceCmd.noCmd: + args.add $cmd + var opts = config.cliPersistenceOptions[cmd].values.toSeq + args = args.concat( opts.map(o => $o) ) + + return args + +proc logFile*(config: CodexConfig): ?string {.raises: [CodexConfigError].} = + let built = config.buildConfig("Invalid codex config cli params") + built.logFile + +proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} = + convertError: + let built = config.buildConfig("Invalid codex config cli params") + return parseEnum[LogLevel](built.logLevel.toUpperAscii) + +proc debug*( + self: CodexConfigs, + idx: int, + enabled = true): CodexConfigs {.raises: [CodexConfigError].} = + ## output log in stdout for a specific node in the group + + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].debugEnabled = enabled + return startConfig + +proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} = + ## output log in stdout for all nodes in group + var startConfig = self + for config in startConfig.configs.mitems: + config.debugEnabled = enabled + return startConfig + +proc withLogFile*( + self: CodexConfigs, + idx: int): CodexConfigs {.raises: [CodexConfigError].} = + + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--log-file", "") + return startConfig + +proc withLogFile*( + self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} = + ## typically called from test, sets config such that a log file should be + ## created + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--log-file", "") + return startConfig + +proc withLogFile*( + self: var CodexConfig, + logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs = + ## typically called internally from the test suite, sets a log file path to + ## be created during the test run, for a specified node in the group + # var config = self + self.addCliOption("--log-file", logFile) + # return startConfig + +proc withLogLevel*( + self: CodexConfig, + level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} = + + var config = self + config.addCliOption("--log-level", $level) + return config + +proc withLogLevel*( + self: CodexConfigs, + idx: int, + level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} = + + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--log-level", $level) + return startConfig + +proc withLogLevel*( + self: CodexConfigs, + level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} = + + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--log-level", $level) + return startConfig + +proc withSimulateProofFailures*( + self: CodexConfigs, + idx: int, + failEveryNProofs: int +): CodexConfigs {.raises: [CodexConfigError].} = + + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption( + StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs) + return startConfig + +proc withSimulateProofFailures*( + self: CodexConfigs, + failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} = + + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption( + StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs) + return startConfig + +proc logLevelWithTopics( + config: CodexConfig, + topics: varargs[string]): string {.raises: [CodexConfigError].} = + + convertError: + var logLevel = LogLevel.INFO + let built = config.buildConfig("Invalid codex config cli params") + logLevel = parseEnum[LogLevel](built.logLevel.toUpperAscii) + let level = $logLevel & ";TRACE: " & topics.join(",") + return level + +proc withLogTopics*( + self: CodexConfigs, + idx: int, + topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} = + + self.checkBounds idx + + convertError: + let config = self.configs[idx] + let level = config.logLevelWithTopics(topics) + var startConfig = self + return startConfig.withLogLevel(idx, level) + +proc withLogTopics*( + self: CodexConfigs, + topics: varargs[string] +): CodexConfigs {.raises: [CodexConfigError].} = + + var startConfig = self + for config in startConfig.configs.mitems: + let level = config.logLevelWithTopics(topics) + config = config.withLogLevel(level) + return startConfig + +proc withStorageQuota*( + self: CodexConfigs, + idx: int, + quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} = + + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--storage-quota", $quota) + return startConfig + +proc withStorageQuota*( + self: CodexConfigs, + quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} = + + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--storage-quota", $quota) + return startConfig diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim new file mode 100644 index 00000000..ce633434 --- /dev/null +++ b/tests/integration/codexprocess.nim @@ -0,0 +1,76 @@ +import pkg/questionable +import pkg/questionable/results +import pkg/confutils +import pkg/chronicles +import pkg/chronos/asyncproc +import pkg/ethers +import pkg/libp2p +import std/os +import std/strutils +import codex/conf +import ./codexclient +import ./nodeprocess + +export codexclient +export chronicles +export nodeprocess + +logScope: + topics = "integration testing codex process" + +type + CodexProcess* = ref object of NodeProcess + client: ?CodexClient + +method workingDir(node: CodexProcess): string = + return currentSourcePath() / ".." / ".." / ".." + +method executable(node: CodexProcess): string = + return "build" / "codex" + +method startedOutput(node: CodexProcess): string = + return "REST service started" + +method processOptions(node: CodexProcess): set[AsyncProcessOption] = + return {AsyncProcessOption.StdErrToStdOut} + +method outputLineEndings(node: CodexProcess): string = + return "\n" + +method onOutputLineCaptured(node: CodexProcess, line: string) = + discard + +proc dataDir(node: CodexProcess): string = + let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false) + return config.dataDir.string + +proc ethAccount*(node: CodexProcess): Address = + let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false) + without ethAccount =? config.ethAccount: + raiseAssert "eth account not set" + return Address(ethAccount) + +proc apiUrl*(node: CodexProcess): string = + let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false) + return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" + +proc client*(node: CodexProcess): CodexClient = + if client =? node.client: + return client + let client = CodexClient.new(node.apiUrl) + node.client = some client + return client + +method stop*(node: CodexProcess) {.async.} = + logScope: + nodeName = node.name + + await procCall NodeProcess(node).stop() + + trace "stopping codex client" + if client =? node.client: + client.close() + node.client = none CodexClient + +method removeDataDir*(node: CodexProcess) = + removeDir(node.dataDir) diff --git a/tests/integration/hardhatconfig.nim b/tests/integration/hardhatconfig.nim new file mode 100644 index 00000000..fbd04fe8 --- /dev/null +++ b/tests/integration/hardhatconfig.nim @@ -0,0 +1,15 @@ +type + HardhatConfig* = object + logFile*: bool + debugEnabled*: bool + +proc debug*(self: HardhatConfig, enabled = true): HardhatConfig = + ## output log in stdout + var config = self + config.debugEnabled = enabled + return config + +proc withLogFile*(self: HardhatConfig, logToFile: bool = true): HardhatConfig = + var config = self + config.logFile = logToFile + return config diff --git a/tests/integration/hardhatprocess.nim b/tests/integration/hardhatprocess.nim new file mode 100644 index 00000000..6cfab47d --- /dev/null +++ b/tests/integration/hardhatprocess.nim @@ -0,0 +1,131 @@ +import pkg/questionable +import pkg/questionable/results +import pkg/confutils +import pkg/chronicles +import pkg/chronos +import pkg/chronos/asyncproc +import pkg/stew/io2 +import std/os +import std/sets +import std/sequtils +import std/strutils +import pkg/codex/conf +import pkg/codex/utils/trackedfutures +import ./codexclient +import ./nodeprocess + +export codexclient +export chronicles + +logScope: + topics = "integration testing hardhat process" + nodeName = "hardhat" + +type + HardhatProcess* = ref object of NodeProcess + logFile: ?IoHandle + +method workingDir(node: HardhatProcess): string = + return currentSourcePath() / ".." / ".." / ".." / "vendor" / "codex-contracts-eth" + +method executable(node: HardhatProcess): string = + return "node_modules" / ".bin" / "hardhat" + +method startedOutput(node: HardhatProcess): string = + return "Started HTTP and WebSocket JSON-RPC server at" + +method processOptions(node: HardhatProcess): set[AsyncProcessOption] = + return {} + +method outputLineEndings(node: HardhatProcess): string = + return "\n" + +proc openLogFile(node: HardhatProcess, logFilePath: string): IoHandle = + let logFileHandle = openFile( + logFilePath, + {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate} + ) + + without fileHandle =? logFileHandle: + fatal "failed to open log file", + path = logFilePath, + errorCode = $logFileHandle.error + + raiseAssert "failed to open log file, aborting" + + return fileHandle + +method start*(node: HardhatProcess) {.async.} = + + let poptions = node.processOptions + {AsyncProcessOption.StdErrToStdOut} + trace "starting node", + args = node.arguments, + executable = node.executable, + workingDir = node.workingDir, + processOptions = poptions + + try: + node.process = await startProcess( + node.executable, + node.workingDir, + @["node", "--export", "deployment-localhost.json"].concat(node.arguments), + options = poptions, + stdoutHandle = AsyncProcess.Pipe + ) + except CancelledError as error: + raise error + except CatchableError as e: + error "failed to start hardhat process", error = e.msg + +proc startNode*( + _: type HardhatProcess, + args: seq[string], + debug: string | bool = false, + name: string +): Future[HardhatProcess] {.async.} = + + var logFilePath = "" + + var arguments = newSeq[string]() + for arg in args: + if arg.contains "--log-file=": + logFilePath = arg.split("=")[1] + else: + arguments.add arg + + trace "starting hardhat node", arguments + ## Starts a Hardhat Node with the specified arguments. + ## Set debug to 'true' to see output of the node. + let hardhat = HardhatProcess( + arguments: arguments, + debug: ($debug != "false"), + trackedFutures: TrackedFutures.new(), + name: "hardhat" + ) + + await hardhat.start() + + if logFilePath != "": + hardhat.logFile = some hardhat.openLogFile(logFilePath) + + return hardhat + +method onOutputLineCaptured(node: HardhatProcess, line: string) = + without logFile =? node.logFile: + return + + if error =? logFile.writeFile(line & "\n").errorOption: + error "failed to write to hardhat file", errorCode = error + discard logFile.closeFile() + node.logFile = none IoHandle + +method stop*(node: HardhatProcess) {.async.} = + # terminate the process + await procCall NodeProcess(node).stop() + + if logFile =? node.logFile: + trace "closing hardhat log file" + discard logFile.closeFile() + +method removeDataDir*(node: HardhatProcess) = + discard diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim new file mode 100644 index 00000000..2b81bdd8 --- /dev/null +++ b/tests/integration/marketplacesuite.nim @@ -0,0 +1,92 @@ +import pkg/chronos +import pkg/ethers/erc20 +from pkg/libp2p import Cid +import pkg/codex/contracts/marketplace as mp +import pkg/codex/periods +import pkg/codex/utils/json +import ./multinodes +import ../contracts/time +import ../contracts/deployment + +export mp +export multinodes + +template marketplacesuite*(name: string, body: untyped) = + + multinodesuite name: + + var marketplace {.inject, used.}: Marketplace + var period: uint64 + var periodicity: Periodicity + var token {.inject, used.}: Erc20Token + + proc getCurrentPeriod(): Future[Period] {.async.} = + return periodicity.periodOf(await ethProvider.currentTime()) + + proc advanceToNextPeriod() {.async.} = + let periodicity = Periodicity(seconds: period.u256) + let currentTime = await ethProvider.currentTime() + let currentPeriod = periodicity.periodOf(currentTime) + let endOfPeriod = periodicity.periodEnd(currentPeriod) + await ethProvider.advanceTimeTo(endOfPeriod + 1) + + template eventuallyP(condition: untyped, finalPeriod: Period): bool = + + proc eventuallyP: Future[bool] {.async.} = + while( + let currentPeriod = await getCurrentPeriod(); + currentPeriod <= finalPeriod + ): + if condition: + return true + await sleepAsync(1.millis) + return condition + + await eventuallyP() + + proc periods(p: int): uint64 = + p.uint64 * period + + proc createAvailabilities(datasetSize: int, duration: uint64) = + # post availability to each provider + for i in 0.. //_.log + + var logDir = currentSourcePath.parentDir() / + "logs" / + sanitize($starttime & "__" & name) / + sanitize($currentTestName) + createDir(logDir) + + var fn = $role + if idx =? index: + fn &= "_" & $idx + fn &= ".log" + + let fileName = logDir / fn + return fileName + + proc newHardhatProcess( + config: HardhatConfig, + role: Role + ): Future[NodeProcess] {.async.} = + + var args: seq[string] = @[] + if config.logFile: + let updatedLogFile = getLogFile(role, none int) + args.add "--log-file=" & updatedLogFile + + let node = await HardhatProcess.startNode(args, config.debugEnabled, "hardhat") + try: + await node.waitUntilStarted() + except NodeProcessError as e: + raiseMultiNodeSuiteError "hardhat node not started: " & e.msg + + trace "hardhat node started" + return node + + proc newCodexProcess(roleIdx: int, + conf: CodexConfig, + role: Role + ): Future[NodeProcess] {.async.} = + + let nodeIdx = running.len + var config = conf + + if nodeIdx > accounts.len - 1: + raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx & + ", not enough eth accounts." + + let datadir = getTempDir() / "Codex" / + sanitize($starttime) / + sanitize($role & "_" & $roleIdx) + + try: + if config.logFile.isSome: + let updatedLogFile = getLogFile(role, some roleIdx) + config.withLogFile(updatedLogFile) + + config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx)) + config.addCliOption("--data-dir", datadir) + config.addCliOption("--nat", "127.0.0.1") + config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0") + config.addCliOption("--disc-ip", "127.0.0.1") + config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx)) + + except CodexConfigError as e: + raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg + + let node = await CodexProcess.startNode( + config.cliArgs, + config.debugEnabled, + $role & $roleIdx + ) + + try: + await node.waitUntilStarted() + trace "node started", nodeName = $role & $roleIdx + except NodeProcessError as e: + raiseMultiNodeSuiteError "node not started, error: " & e.msg + + return node + + proc hardhat: HardhatProcess = + for r in running: + if r.role == Role.Hardhat: + return HardhatProcess(r.node) + return nil + + proc clients: seq[CodexProcess] {.used.} = + return collect: + for r in running: + if r.role == Role.Client: + CodexProcess(r.node) + + proc providers: seq[CodexProcess] {.used.} = + return collect: + for r in running: + if r.role == Role.Provider: + CodexProcess(r.node) + + proc validators: seq[CodexProcess] {.used.} = + return collect: + for r in running: + if r.role == Role.Validator: + CodexProcess(r.node) + + proc startHardhatNode(config: HardhatConfig): Future[NodeProcess] {.async.} = + return await newHardhatProcess(config, Role.Hardhat) + + proc startClientNode(conf: CodexConfig): Future[NodeProcess] {.async.} = + let clientIdx = clients().len + var config = conf + config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + return await newCodexProcess(clientIdx, config, Role.Client) + + proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} = + let providerIdx = providers().len + var config = conf + config.addCliOption("--bootstrap-node", bootstrap) + config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs") + config.addCliOption(PersistenceCmd.prover, "--circom-wasm", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm") + config.addCliOption(PersistenceCmd.prover, "--circom-zkey", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey") + + return await newCodexProcess(providerIdx, config, Role.Provider) + + proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} = + let validatorIdx = validators().len + var config = conf + config.addCliOption("--bootstrap-node", bootstrap) + config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + config.addCliOption(StartUpCmd.persistence, "--validator") + + return await newCodexProcess(validatorIdx, config, Role.Validator) + + proc teardownImpl() {.async.} = + for nodes in @[validators(), clients(), providers()]: + for node in nodes: + await node.stop() # also stops rest client + node.removeDataDir() + + # if hardhat was started in the test, kill the node + # otherwise revert the snapshot taken in the test setup + let hardhat = hardhat() + if not hardhat.isNil: + await hardhat.stop() + else: + discard await send(ethProvider, "evm_revert", @[snapshot]) + + running = @[] + + template failAndTeardownOnError(message: string, tryBody: untyped) = + try: + tryBody + except CatchableError as er: + fatal message, error=er.msg + echo "[FATAL] ", message, ": ", er.msg + await teardownImpl() + when declared(teardownAllIMPL): + teardownAllIMPL() + fail() + quit(1) + + setup: + if var conf =? nodeConfigs.hardhat: + try: + let node = await startHardhatNode(conf) + running.add RunningNode(role: Role.Hardhat, node: node) + except CatchableError as e: + echo "failed to start hardhat node" + fail() + quit(1) + + try: + # Workaround for https://github.com/NomicFoundation/hardhat/issues/2053 + # Do not use websockets, but use http and polling to stop subscriptions + # from being removed after 5 minutes + ethProvider = JsonRpcProvider.new("http://localhost:8545") + # if hardhat was NOT started by the test, take a snapshot so it can be + # reverted in the test teardown + if nodeConfigs.hardhat.isNone: + snapshot = await send(ethProvider, "evm_snapshot") + accounts = await ethProvider.listAccounts() + except CatchableError as e: + echo "Hardhat not running. Run hardhat manually " & + "before executing tests, or include a " & + "HardhatConfig in the test setup." + fail() + quit(1) + + if var clients =? nodeConfigs.clients: + failAndTeardownOnError "failed to start client nodes": + for config in clients.configs: + let node = await startClientNode(config) + running.add RunningNode( + role: Role.Client, + node: node + ) + if clients().len == 1: + without ninfo =? CodexProcess(node).client.info(): + # raise CatchableError instead of Defect (with .get or !) so we + # can gracefully shutdown and prevent zombies + raiseMultiNodeSuiteError "Failed to get node info" + bootstrap = ninfo["spr"].getStr() + + if var providers =? nodeConfigs.providers: + failAndTeardownOnError "failed to start provider nodes": + for config in providers.configs.mitems: + let node = await startProviderNode(config) + running.add RunningNode( + role: Role.Provider, + node: node + ) + + if var validators =? nodeConfigs.validators: + failAndTeardownOnError "failed to start validator nodes": + for config in validators.configs.mitems: + let node = await startValidatorNode(config) + running.add RunningNode( + role: Role.Validator, + node: node + ) + + # ensure that we have a recent block with a fresh timestamp + discard await send(ethProvider, "evm_mine") + + teardown: + await teardownImpl() + + body diff --git a/tests/integration/nodeconfig.nim b/tests/integration/nodeconfig.nim new file mode 100644 index 00000000..d6adb80f --- /dev/null +++ b/tests/integration/nodeconfig.nim @@ -0,0 +1,34 @@ +import pkg/chronicles +import pkg/questionable + +export chronicles + +type + NodeConfig* = ref object of RootObj + logFile*: bool + logLevel*: ?LogLevel + debugEnabled*: bool + +proc debug*[T: NodeConfig](config: T, enabled = true): T = + ## output log in stdout + var startConfig = config + startConfig.debugEnabled = enabled + return startConfig + +proc withLogFile*[T: NodeConfig]( + config: T, + logToFile: bool = true +): T = + + var startConfig = config + startConfig.logFile = logToFile + return startConfig + +proc withLogLevel*[T: NodeConfig]( + config: NodeConfig, + level: LogLevel +): T = + + var startConfig = config + startConfig.logLevel = some level + return startConfig diff --git a/tests/integration/nodeconfigs.nim b/tests/integration/nodeconfigs.nim new file mode 100644 index 00000000..56309006 --- /dev/null +++ b/tests/integration/nodeconfigs.nim @@ -0,0 +1,11 @@ +import pkg/questionable +import ./codexconfig +import ./hardhatconfig + +type + NodeConfigs* = object + clients*: ?CodexConfigs + providers*: ?CodexConfigs + validators*: ?CodexConfigs + hardhat*: ?HardhatConfig + diff --git a/tests/integration/nodeprocess.nim b/tests/integration/nodeprocess.nim new file mode 100644 index 00000000..97f4507f --- /dev/null +++ b/tests/integration/nodeprocess.nim @@ -0,0 +1,175 @@ +import pkg/questionable +import pkg/questionable/results +import pkg/confutils +import pkg/chronicles +import pkg/chronos/asyncproc +import pkg/libp2p +import std/os +import std/strutils +import codex/conf +import codex/utils/exceptions +import codex/utils/trackedfutures +import ./codexclient + +export codexclient +export chronicles + +logScope: + topics = "integration testing node process" + +type + NodeProcess* = ref object of RootObj + process*: AsyncProcessRef + arguments*: seq[string] + debug: bool + trackedFutures*: TrackedFutures + name*: string + NodeProcessError* = object of CatchableError + +method workingDir(node: NodeProcess): string {.base.} = + raiseAssert "not implemented" + +method executable(node: NodeProcess): string {.base.} = + raiseAssert "not implemented" + +method startedOutput(node: NodeProcess): string {.base.} = + raiseAssert "not implemented" + +method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base.} = + raiseAssert "not implemented" + +method outputLineEndings(node: NodeProcess): string {.base.} = + raiseAssert "not implemented" + +method onOutputLineCaptured(node: NodeProcess, line: string) {.base.} = + raiseAssert "not implemented" + +method start*(node: NodeProcess) {.base, async.} = + logScope: + nodeName = node.name + + let poptions = node.processOptions + {AsyncProcessOption.StdErrToStdOut} + trace "starting node", + args = node.arguments, + executable = node.executable, + workingDir = node.workingDir, + processOptions = poptions + + try: + if node.debug: + echo "starting codex node with args: ", node.arguments.join(" ") + node.process = await startProcess( + node.executable, + node.workingDir, + node.arguments, + options = poptions, + stdoutHandle = AsyncProcess.Pipe + ) + except CancelledError as error: + raise error + except CatchableError as e: + error "failed to start node process", error = e.msg + +proc captureOutput( + node: NodeProcess, + output: string, + started: Future[void] +) {.async.} = + + logScope: + nodeName = node.name + + trace "waiting for output", output + + let stream = node.process.stdoutStream + + try: + while node.process.running.option == some true: + while(let line = await stream.readLine(0, node.outputLineEndings); line != ""): + if node.debug: + # would be nice if chronicles could parse and display with colors + echo line + + if not started.isNil and not started.finished and line.contains(output): + started.complete() + + node.onOutputLineCaptured(line) + + await sleepAsync(1.millis) + await sleepAsync(1.millis) + + except AsyncStreamReadError as e: + error "error reading output stream", error = e.msgDetail + +proc startNode*[T: NodeProcess]( + _: type T, + args: seq[string], + debug: string | bool = false, + name: string +): Future[T] {.async.} = + + ## Starts a Codex Node with the specified arguments. + ## Set debug to 'true' to see output of the node. + let node = T( + arguments: @args, + debug: ($debug != "false"), + trackedFutures: TrackedFutures.new(), + name: name + ) + await node.start() + return node + +method stop*(node: NodeProcess) {.base, async.} = + logScope: + nodeName = node.name + + await node.trackedFutures.cancelTracked() + if node.process != nil: + try: + trace "terminating node process..." + if errCode =? node.process.terminate().errorOption: + error "failed to terminate process", errCode + + trace "waiting for node process to exit" + let exitCode = await node.process.waitForExit(3.seconds) + if exitCode > 0: + error "failed to exit process, check for zombies", exitCode + + trace "closing node process' streams" + await node.process.closeWait() + except CancelledError as error: + raise error + except CatchableError as e: + error "error stopping node process", error = e.msg + + finally: + node.process = nil + + trace "node stopped" + +proc waitUntilStarted*(node: NodeProcess) {.async.} = + logScope: + nodeName = node.name + + trace "waiting until node started" + + let started = newFuture[void]() + try: + discard node.captureOutput(node.startedOutput, started).track(node) + await started.wait(35.seconds) # allow enough time for proof generation + except AsyncTimeoutError: + # attempt graceful shutdown in case node was partially started, prevent + # zombies + await node.stop() + # raise error here so that all nodes (not just this one) can be + # shutdown gracefully + raise newException(NodeProcessError, "node did not output '" & + node.startedOutput & "'") + +proc restart*(node: NodeProcess) {.async.} = + await node.stop() + await node.start() + await node.waitUntilStarted() + +method removeDataDir*(node: NodeProcess) {.base.} = + raiseAssert "[removeDataDir] not implemented" diff --git a/tests/integration/nodes.nim b/tests/integration/nodes.nim index 0c8ea4d3..a60a55c7 100644 --- a/tests/integration/nodes.nim +++ b/tests/integration/nodes.nim @@ -2,47 +2,90 @@ import std/osproc import std/os import std/streams import std/strutils +import pkg/codex/conf +import pkg/codex/logutils +import pkg/confutils +import pkg/libp2p +import pkg/questionable +import ./codexclient + +export codexclient const workingDir = currentSourcePath() / ".." / ".." / ".." const executable = "build" / "codex" -type NodeProcess* = ref object - process: Process - arguments: seq[string] - debug: bool +type + NodeProcess* = ref object + process: Process + arguments: seq[string] + debug: bool + client: ?CodexClient proc start(node: NodeProcess) = if node.debug: - node.process = startProcess( + node.process = osproc.startProcess( executable, workingDir, node.arguments, options={poParentStreams} ) - sleep(1000) else: - node.process = startProcess( + node.process = osproc.startProcess( executable, workingDir, node.arguments ) - for line in node.process.outputStream.lines: - if line.contains("Started codex node"): - break -proc startNode*(args: openArray[string], debug = false): NodeProcess = +proc waitUntilOutput*(node: NodeProcess, output: string) = + if node.debug: + raiseAssert "cannot read node output when in debug mode" + for line in node.process.outputStream.lines: + if line.contains(output): + return + raiseAssert "node did not output '" & output & "'" + +proc waitUntilStarted*(node: NodeProcess) = + if node.debug: + sleep(10_000) + else: + node.waitUntilOutput("Started codex node") + +proc startNode*(args: openArray[string], debug: string | bool = false): NodeProcess = ## Starts a Codex Node with the specified arguments. ## Set debug to 'true' to see output of the node. - let node = NodeProcess(arguments: @args, debug: debug) + let node = NodeProcess(arguments: @args, debug: ($debug != "false")) node.start() node +proc dataDir(node: NodeProcess): string = + let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false) + config.dataDir.string + +proc apiUrl(node: NodeProcess): string = + let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false) + "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" + +proc client*(node: NodeProcess): CodexClient = + if client =? node.client: + return client + let client = CodexClient.new(node.apiUrl) + node.client = some client + client + proc stop*(node: NodeProcess) = - let process = node.process - process.terminate() - discard process.waitForExit(timeout=5_000) - process.close() + if node.process != nil: + node.process.terminate() + discard node.process.waitForExit(timeout=5_000) + node.process.close() + node.process = nil + if client =? node.client: + node.client = none CodexClient + client.close() proc restart*(node: NodeProcess) = node.stop() node.start() + node.waitUntilStarted() + +proc removeDataDir*(node: NodeProcess) = + removeDir(node.dataDir) diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim new file mode 100644 index 00000000..fc2bd0bd --- /dev/null +++ b/tests/integration/testblockexpiration.nim @@ -0,0 +1,85 @@ +import std/os +import std/httpclient +import std/strutils +from std/net import TimeoutError + +import pkg/chronos +import ../ethertest +import ./nodes + +ethersuite "Node block expiration tests": + var node: NodeProcess + var baseurl: string + + let dataDir = getTempDir() / "Codex1" + let content = "test file content" + + setup: + baseurl = "http://localhost:8080/api/codex/v1" + + teardown: + node.stop() + + dataDir.removeDir() + + proc startTestNode(blockTtlSeconds: int) = + node = startNode([ + "--api-port=8080", + "--data-dir=" & dataDir, + "--nat=127.0.0.1", + "--listen-addrs=/ip4/127.0.0.1/tcp/0", + "--disc-ip=127.0.0.1", + "--disc-port=8090", + "--block-ttl=" & $blockTtlSeconds, + "--block-mi=1", + "--block-mn=10" + ], debug = false) + node.waitUntilStarted() + + proc uploadTestFile(): string = + let client = newHttpClient() + let uploadUrl = baseurl & "/data" + let uploadResponse = client.post(uploadUrl, content) + check uploadResponse.status == "200 OK" + client.close() + uploadResponse.body + + proc downloadTestFile(contentId: string, local = false): Response = + let client = newHttpClient(timeout=3000) + let downloadUrl = baseurl & "/data/" & + contentId & (if local: "" else: "/network") + + let content = client.get(downloadUrl) + client.close() + content + + proc hasFile(contentId: string): bool = + let client = newHttpClient(timeout=3000) + let dataLocalUrl = baseurl & "/data/" & contentId + let content = client.get(dataLocalUrl) + client.close() + content.code == Http200 + + test "node retains not-expired file": + startTestNode(blockTtlSeconds = 10) + + let contentId = uploadTestFile() + + await sleepAsync(2.seconds) + + let response = downloadTestFile(contentId, local = true) + check: + hasFile(contentId) + response.status == "200 OK" + response.body == content + + test "node deletes expired file": + startTestNode(blockTtlSeconds = 1) + + let contentId = uploadTestFile() + + await sleepAsync(3.seconds) + + check: + not hasFile(contentId) + downloadTestFile(contentId, local = true).code == Http404 diff --git a/tests/integration/testcli.nim b/tests/integration/testcli.nim new file mode 100644 index 00000000..ee0aabe0 --- /dev/null +++ b/tests/integration/testcli.nim @@ -0,0 +1,50 @@ +import std/unittest +import std/tempfiles +import codex/conf +import codex/utils/fileutils +import ./nodes + +suite "Command line interface": + + let key = "4242424242424242424242424242424242424242424242424242424242424242" + + test "complains when persistence is enabled without ethereum account": + let node = startNode(@[ + "persistence" + ]) + node.waitUntilOutput("Persistence enabled, but no Ethereum account was set") + node.stop() + + test "complains when ethereum private key file has wrong permissions": + let unsafeKeyFile = genTempPath("", "") + discard unsafeKeyFile.writeFile(key, 0o666) + let node = startNode(@[ + "persistence", + "--eth-private-key=" & unsafeKeyFile]) + node.waitUntilOutput("Ethereum private key file does not have safe file permissions") + node.stop() + discard removeFile(unsafeKeyFile) + + test "complains when persistence is enabled without accessible r1cs file": + let node = startNode(@["persistence", "prover"]) + node.waitUntilOutput("r1cs file not readable, doesn't exist or wrong extension (.r1cs)") + node.stop() + + test "complains when persistence is enabled without accessible wasm file": + let node = startNode(@[ + "persistence", + "prover", + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs" + ]) + node.waitUntilOutput("wasm file not readable, doesn't exist or wrong extension (.wasm)") + node.stop() + + test "complains when persistence is enabled without accessible zkey file": + let node = startNode(@[ + "persistence", + "prover", + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + "--circom-wasm=tests/circuits/fixtures/proof_main.wasm" + ]) + node.waitUntilOutput("zkey file not readable, doesn't exist or wrong extension (.zkey)") + node.stop() diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim new file mode 100644 index 00000000..1e078a41 --- /dev/null +++ b/tests/integration/testecbug.nim @@ -0,0 +1,60 @@ +from pkg/libp2p import Cid, init +import ../examples +import ./marketplacesuite +import ./nodeconfigs +import ./hardhatconfig + +marketplacesuite "Bug #821 - node crashes during erasure coding": + + test "should be able to create storage request and download dataset", + NodeConfigs( + clients: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output.debug() + .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("node", "erasure", "marketplace", ) + .some, + + providers: + CodexConfigs.init(nodes=0) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): + let reward = 400.u256 + let duration = 10.periods + let collateral = 200.u256 + let expiry = 5.periods + let data = await RandomChunker.example(blocks=8) + let client = clients()[0] + let clientApi = client.client + + let cid = clientApi.upload(data).get + + var requestId = none RequestId + proc onStorageRequested(event: StorageRequested) {.raises:[].} = + requestId = event.requestId.some + + let subscription = await marketplace.subscribe(StorageRequested, onStorageRequested) + + # client requests storage but requires multiple slots to host the content + let id = await clientApi.requestStorage( + cid, + duration=duration, + reward=reward, + expiry=expiry, + collateral=collateral, + nodes=3, + tolerance=1 + ) + + check eventually(requestId.isSome, timeout=expiry.int * 1000) + + let request = await marketplace.getRequest(requestId.get) + let cidFromRequest = Cid.init(request.content.cid).get() + let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + check downloaded.isOk + check downloaded.get.toHex == data.toHex + + await subscription.unsubscribe() diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim new file mode 100644 index 00000000..337d0847 --- /dev/null +++ b/tests/integration/testmarketplace.nim @@ -0,0 +1,165 @@ +import pkg/stew/byteutils +import pkg/codex/units +import ../examples +import ../contracts/time +import ../contracts/deployment +import ./marketplacesuite +import ./twonodes +import ./nodeconfigs + +twonodessuite "Marketplace", debug1 = false, debug2 = false: + setup: + # Our Hardhat configuration does use automine, which means that time tracked by `ethProvider.currentTime()` is not + # advanced until blocks are mined and that happens only when transaction is submitted. + # As we use in tests ethProvider.currentTime() which uses block timestamp this can lead to synchronization issues. + await ethProvider.advanceTime(1.u256) + + test "nodes negotiate contracts on the marketplace": + let size = 0xFFFFFF.u256 + let data = await RandomChunker.example(blocks=8) + # client 2 makes storage available + let availability = client2.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + + # client 1 requests storage + let cid = client1.upload(data).get + let id = client1.requestStorage( + cid, + duration=10*60.u256, + reward=400.u256, + proofProbability=3.u256, + expiry=5*60, + collateral=200.u256, + nodes = 5, + tolerance = 2).get + + check eventually(client1.purchaseStateIs(id, "started"), timeout=5*60*1000) + let purchase = client1.getPurchase(id).get + check purchase.error == none string + let availabilities = client2.getAvailabilities().get + check availabilities.len == 1 + let newSize = availabilities[0].freeSize + check newSize > 0 and newSize < size + + let reservations = client2.getAvailabilityReservations(availability.id).get + check reservations.len == 5 + check reservations[0].requestId == purchase.requestId + + test "node slots gets paid out": + let size = 0xFFFFFF.u256 + let data = await RandomChunker.example(blocks = 8) + let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner()) + let tokenAddress = await marketplace.token() + let token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) + let reward = 400.u256 + let duration = 10*60.u256 + let nodes = 5'u + + # client 2 makes storage available + let startBalance = await token.balanceOf(account2) + discard client2.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + + # client 1 requests storage + let cid = client1.upload(data).get + let id = client1.requestStorage( + cid, + duration=duration, + reward=reward, + proofProbability=3.u256, + expiry=5*60, + collateral=200.u256, + nodes = nodes, + tolerance = 2).get + + check eventually(client1.purchaseStateIs(id, "started"), timeout=5*60*1000) + let purchase = client1.getPurchase(id).get + check purchase.error == none string + + # Proving mechanism uses blockchain clock to do proving/collect/cleanup round + # hence we must use `advanceTime` over `sleepAsync` as Hardhat does mine new blocks + # only with new transaction + await ethProvider.advanceTime(duration) + + check eventually (await token.balanceOf(account2)) - startBalance == duration*reward*nodes.u256 + +marketplacesuite "Marketplace payouts": + + test "expired request partially pays out for stored time", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + + clients: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output.debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "erasure") + .some, + + providers: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): + let reward = 400.u256 + let duration = 10.periods + let collateral = 200.u256 + let expiry = 5.periods + let data = await RandomChunker.example(blocks=8) + let client = clients()[0] + let provider = providers()[0] + let clientApi = client.client + let providerApi = provider.client + let startBalanceProvider = await token.balanceOf(provider.ethAccount) + let startBalanceClient = await token.balanceOf(client.ethAccount) + + # provider makes storage available + discard providerApi.postAvailability( + # make availability size small enough that we can't fill all the slots, + # thus causing a cancellation + totalSize=(data.len div 2).u256, + duration=duration.u256, + minPrice=reward, + maxCollateral=collateral) + + let cid = clientApi.upload(data).get + + var slotIdxFilled = none UInt256 + proc onSlotFilled(event: SlotFilled) = + slotIdxFilled = some event.slotIndex + + let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled) + + # client requests storage but requires multiple slots to host the content + let id = await clientApi.requestStorage( + cid, + duration=duration, + reward=reward, + expiry=expiry, + collateral=collateral, + nodes=3, + tolerance=1 + ) + + # wait until one slot is filled + check eventually(slotIdxFilled.isSome, timeout=expiry.int * 1000) + + # wait until sale is cancelled + without requestId =? clientApi.requestId(id): + fail() + let slotId = slotId(requestId, !slotIdxFilled) + check eventually(providerApi.saleStateIs(slotId, "SaleCancelled"), timeout=expiry.int * 1000) + + check eventually ( + let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); + endBalanceProvider > startBalanceProvider and + endBalanceProvider < startBalanceProvider + expiry.u256*reward + ) + check eventually ( + let endBalanceClient = (await token.balanceOf(client.ethAccount)); + let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); + (startBalanceClient - endBalanceClient) == (endBalanceProvider - startBalanceProvider) + ) + + await subscription.unsubscribe() diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim new file mode 100644 index 00000000..057d7f46 --- /dev/null +++ b/tests/integration/testproofs.nim @@ -0,0 +1,310 @@ +from std/times import inMilliseconds +import pkg/codex/logutils +import pkg/stew/byteutils +import ../contracts/time +import ../contracts/deployment +import ../codex/helpers +import ../examples +import ./marketplacesuite +import ./nodeconfigs + +export logutils + +logScope: + topics = "integration test proofs" + + +marketplacesuite "Hosts submit regular proofs": + + test "hosts submit periodic proofs for slots they fill", NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: + HardhatConfig.none, + + clients: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node, marketplace") + .some, + + providers: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") + .some, + ): + let client0 = clients()[0].client + let expiry = 5.periods + let duration = expiry + 5.periods + + let data = await RandomChunker.example(blocks=8) + createAvailabilities(data.len * 2, duration) # TODO: better value for data.len + + let cid = client0.upload(data).get + + let purchaseId = await client0.requestStorage( + cid, + expiry=expiry, + duration=duration, + nodes=3, + tolerance=1 + ) + check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) + + var proofWasSubmitted = false + proc onProofSubmitted(event: ProofSubmitted) = + proofWasSubmitted = true + + let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted) + + check eventually(proofWasSubmitted, timeout=(duration - expiry).int * 1000) + + await subscription.unsubscribe() + + +marketplacesuite "Simulate invalid proofs": + + # TODO: these are very loose tests in that they are not testing EXACTLY how + # proofs were marked as missed by the validator. These tests should be + # tightened so that they are showing, as an integration test, that specific + # proofs are being marked as missed by the validator. + + test "slot is freed after too many invalid proofs submitted", NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: + HardhatConfig.none, + + clients: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "clock") + .some, + + providers: + CodexConfigs.init(nodes=1) + .withSimulateProofFailures(idx=0, failEveryNProofs=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder") + .some, + + validators: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator", "onchain", "ethers", "clock") + .some + ): + let client0 = clients()[0].client + let expiry = 5.periods + let duration = expiry + 10.periods + + let data = await RandomChunker.example(blocks=8) + createAvailabilities(data.len * 2, duration) # TODO: better value for data.len + + let cid = client0.upload(data).get + + let purchaseId = await client0.requestStorage( + cid, + expiry=expiry, + duration=duration, + nodes=3, + tolerance=1, + proofProbability=1 + ) + let requestId = client0.requestId(purchaseId).get + + check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) + + var slotWasFreed = false + proc onSlotFreed(event: SlotFreed) = + if event.requestId == requestId: + slotWasFreed = true + + let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed) + + check eventually(slotWasFreed, timeout=(duration - expiry).int * 1000) + + await subscription.unsubscribe() + + test "slot is not freed when not enough invalid proofs submitted", NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + + clients: + CodexConfigs.init(nodes=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") + .some, + + providers: + CodexConfigs.init(nodes=1) + .withSimulateProofFailures(idx=0, failEveryNProofs=1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node") + .some, + + validators: + CodexConfigs.init(nodes=1) + # .debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator", "onchain", "ethers", "clock") + .some + ): + let client0 = clients()[0].client + let expiry = 5.periods + # In 2 periods you cannot have enough invalid proofs submitted: + let duration = expiry + 2.periods + + let data = await RandomChunker.example(blocks=8) + createAvailabilities(data.len * 2, duration) # TODO: better value for data.len + + let cid = client0.upload(data).get + + let purchaseId = await client0.requestStorage( + cid, + expiry=expiry, + duration=duration, + nodes=3, + tolerance=1, + proofProbability=1 + ) + let requestId = client0.requestId(purchaseId).get + + check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) + + var slotWasFreed = false + proc onSlotFreed(event: SlotFreed) = + if event.requestId == requestId: + slotWasFreed = true + + let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed) + + # check not freed + await sleepAsync((duration - expiry).int.seconds) + check not slotWasFreed + + await subscription.unsubscribe() + + # TODO: uncomment once fixed + # test "host that submits invalid proofs is paid out less", NodeConfigs( + # # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + # # hardhat: HardhatConfig().withLogFile(), + + # clients: + # CodexConfig() + # .nodes(1) + # # .debug() # uncomment to enable console log output.debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "erasure", "clock", "purchases"), + + # providers: + # CodexConfig() + # .nodes(3) + # .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2) + # # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node"), + + # validators: + # CodexConfig() + # .nodes(1) + # # .debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator") + # ): + # let client0 = clients()[0].client + # let provider0 = providers()[0] + # let provider1 = providers()[1] + # let provider2 = providers()[2] + # let totalPeriods = 25 + + # let datasetSizeInBlocks = 3 + # let data = await RandomChunker.example(blocks=datasetSizeInBlocks) + # # original data = 3 blocks so slot size will be 4 blocks + # let slotSize = (DefaultBlockSize * 4.NBytes).Natural.u256 + + # discard provider0.client.postAvailability( + # totalSize=slotSize, # should match 1 slot only + # duration=totalPeriods.periods.u256, + # minPrice=300.u256, + # maxCollateral=200.u256 + # ) + + # let cid = client0.upload(data).get + + # let purchaseId = await client0.requestStorage( + # cid, + # duration=totalPeriods.periods, + # expiry=10.periods, + # nodes=3, + # tolerance=1, + # origDatasetSizeInBlocks=datasetSizeInBlocks + # ) + + # without requestId =? client0.requestId(purchaseId): + # fail() + + # var filledSlotIds: seq[SlotId] = @[] + # proc onSlotFilled(event: SlotFilled) = + # let slotId = slotId(event.requestId, event.slotIndex) + # filledSlotIds.add slotId + + # let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled) + + # # wait til first slot is filled + # check eventually filledSlotIds.len > 0 + + # # now add availability for providers 1 and 2, which should allow them to to + # # put the remaining slots in their queues + # discard provider1.client.postAvailability( + # totalSize=slotSize, # should match 1 slot only + # duration=totalPeriods.periods.u256, + # minPrice=300.u256, + # maxCollateral=200.u256 + # ) + + # check eventually filledSlotIds.len > 1 + + # discard provider2.client.postAvailability( + # totalSize=slotSize, # should match 1 slot only + # duration=totalPeriods.periods.u256, + # minPrice=300.u256, + # maxCollateral=200.u256 + # ) + + # check eventually filledSlotIds.len > 2 + + # # Wait til second slot is filled. SaleFilled happens too quickly, check SaleProving instead. + # check eventually provider1.client.saleStateIs(filledSlotIds[1], "SaleProving") + # check eventually provider2.client.saleStateIs(filledSlotIds[2], "SaleProving") + + # check eventually client0.purchaseStateIs(purchaseId, "started") + + # let currentPeriod = await getCurrentPeriod() + # check eventuallyP( + # # SaleFinished happens too quickly, check SalePayout instead + # provider0.client.saleStateIs(filledSlotIds[0], "SalePayout"), + # currentPeriod + totalPeriods.u256 + 1) + + # check eventuallyP( + # # SaleFinished happens too quickly, check SalePayout instead + # provider1.client.saleStateIs(filledSlotIds[1], "SalePayout"), + # currentPeriod + totalPeriods.u256 + 1) + + # check eventuallyP( + # # SaleFinished happens too quickly, check SalePayout instead + # provider2.client.saleStateIs(filledSlotIds[2], "SalePayout"), + # currentPeriod + totalPeriods.u256 + 1) + + # check eventually( + # (await token.balanceOf(provider1.ethAccount)) > + # (await token.balanceOf(provider0.ethAccount)) + # ) + + # await subscription.unsubscribe() diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim new file mode 100644 index 00000000..bc87f51b --- /dev/null +++ b/tests/integration/testpurchasing.nim @@ -0,0 +1,89 @@ +import std/options +import std/httpclient +import pkg/codex/rng +import ./twonodes +import ../contracts/time +import ../examples + +twonodessuite "Purchasing", debug1 = false, debug2 = false: + + test "node handles storage request": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get + let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get + check id1 != id2 + + test "node retrieves purchase status": + # get one contiguous chunk + let rng = rng.Rng.instance() + let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2) + let data = await chunker.getBytes() + let cid = client1.upload(byteutils.toHex(data)).get + let id = client1.requestStorage( + cid, + duration=100.u256, + reward=2.u256, + proofProbability=3.u256, + expiry=30, + collateral=200.u256, + nodes=3, + tolerance=1).get + + let request = client1.getPurchase(id).get.request.get + check request.ask.duration == 100.u256 + check request.ask.reward == 2.u256 + check request.ask.proofProbability == 3.u256 + check request.expiry == 30 + check request.ask.collateral == 200.u256 + check request.ask.slots == 3'u64 + check request.ask.maxSlotLoss == 1'u64 + + # TODO: We currently do not support encoding single chunks + # test "node retrieves purchase status with 1 chunk": + # let cid = client1.upload("some file contents").get + # let id = client1.requestStorage(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, expiry=30, collateral=200.u256, nodes=2, tolerance=1).get + # let request = client1.getPurchase(id).get.request.get + # check request.ask.duration == 1.u256 + # check request.ask.reward == 2.u256 + # check request.ask.proofProbability == 3.u256 + # check request.expiry == 30 + # check request.ask.collateral == 200.u256 + # check request.ask.slots == 3'u64 + # check request.ask.maxSlotLoss == 1'u64 + + test "node remembers purchase status after restart": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let id = client1.requestStorage(cid, + duration=100.u256, + reward=2.u256, + proofProbability=3.u256, + expiry=30, + collateral=200.u256).get + check eventually client1.purchaseStateIs(id, "submitted") + + node1.restart() + client1.restart() + + check eventually client1.purchaseStateIs(id, "submitted") + let request = client1.getPurchase(id).get.request.get + check request.ask.duration == 100.u256 + check request.ask.reward == 2.u256 + check request.ask.proofProbability == 3.u256 + check request.expiry == 30 + check request.ask.collateral == 200.u256 + check request.ask.slots == 2'u64 + check request.ask.maxSlotLoss == 0'u64 + + test "node requires expiry and its value to be in future": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + + let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256) + check responseMissing.status == "400 Bad Request" + check responseMissing.body == "Expiry required" + + let responseBefore = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=10) + check responseBefore.status == "400 Bad Request" + check "Expiry needs value bigger then zero and smaller then the request's duration" in responseBefore.body diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim new file mode 100644 index 00000000..1834dcf2 --- /dev/null +++ b/tests/integration/testrestapi.nim @@ -0,0 +1,130 @@ +import std/httpclient +import std/sequtils +from pkg/libp2p import `==` +import pkg/codex/units +import ./twonodes +import ../examples + +twonodessuite "REST API", debug1 = false, debug2 = false: + + test "nodes can print their peer information": + check !client1.info() != !client2.info() + + test "nodes can set chronicles log level": + client1.setLogLevel("DEBUG;TRACE:codex") + + test "node accepts file uploads": + let cid1 = client1.upload("some file contents").get + let cid2 = client1.upload("some other contents").get + check cid1 != cid2 + + test "node shows used and available space": + discard client1.upload("some file contents").get + discard client1.postAvailability(totalSize=12.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + let space = client1.space().tryGet() + check: + space.totalBlocks == 2 + space.quotaMaxBytes == 8589934592.NBytes + space.quotaUsedBytes == 65592.NBytes + space.quotaReservedBytes == 12.NBytes + + test "node lists local files": + let content1 = "some file contents" + let content2 = "some other contents" + + let cid1 = client1.upload(content1).get + let cid2 = client1.upload(content2).get + let list = client1.list().get + + check: + [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) + + test "request storage fails for datasets that are too small": + let cid = client1.upload("some file contents").get + let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, nodes=2, collateral=200.u256, expiry=9) + + check: + response.status == "400 Bad Request" + response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes" + + test "request storage succeeds for sufficiently sized datasets": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + + check: + response.status == "200 OK" + + test "request storage fails if nodes and tolerance aren't correct": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(1, 0), (1, 1), (2, 1), (3, 2), (3, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "400 Bad Request" + check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + + test "request storage fails if tolerance > nodes (underflow protection)": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(0, 1), (1, 2), (2, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "400 Bad Request" + check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`" + + test "request storage succeeds if nodes and tolerance within range": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(2, 0), (3, 1), (5, 2)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "200 OK" diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim new file mode 100644 index 00000000..2a57d0f0 --- /dev/null +++ b/tests/integration/testsales.nim @@ -0,0 +1,83 @@ +import std/httpclient +import pkg/codex/contracts +import ./twonodes +import ../codex/examples +import ../contracts/time + +proc findItem[T](items: seq[T], item: T): ?!T = + for tmp in items: + if tmp == item: + return success tmp + + return failure("Not found") + +twonodessuite "Sales", debug1 = false, debug2 = false: + + test "node handles new storage availability": + let availability1 = client1.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + let availability2 = client1.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get + check availability1 != availability2 + + test "node lists storage that is for sale": + let availability = client1.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + check availability in client1.getAvailabilities().get + + test "updating non-existing availability": + let nonExistingResponse = client1.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some) + check nonExistingResponse.status == "404 Not Found" + + test "updating availability": + let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get + + client1.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some) + + let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get + check updatedAvailability.duration == 100 + check updatedAvailability.minPrice == 200 + check updatedAvailability.maxCollateral == 200 + check updatedAvailability.totalSize == 140000 + check updatedAvailability.freeSize == 140000 + + test "updating availability - freeSize is not allowed to be changed": + let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get + let freeSizeResponse = client1.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some) + check freeSizeResponse.status == "400 Bad Request" + check "not allowed" in freeSizeResponse.body + + test "updating availability - updating totalSize": + let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get + client1.patchAvailability(availability.id, totalSize=100000.u256.some) + let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get + check updatedAvailability.totalSize == 100000 + check updatedAvailability.freeSize == 100000 + + test "updating availability - updating totalSize does not allow bellow utilized": + let originalSize = 0xFFFFFF.u256 + let data = await RandomChunker.example(blocks=8) + let availability = client1.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + + # Lets create storage request that will utilize some of the availability's space + let cid = client2.upload(data).get + let id = client2.requestStorage( + cid, + duration=10*60.u256, + reward=400.u256, + proofProbability=3.u256, + expiry=5*60, + collateral=200.u256, + nodes = 5, + tolerance = 2).get + + check eventually(client2.purchaseStateIs(id, "started"), timeout=5*60*1000) + let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get + check updatedAvailability.totalSize != updatedAvailability.freeSize + + let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize + let totalSizeResponse = client1.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some) + check totalSizeResponse.status == "400 Bad Request" + check "totalSize must be larger then current totalSize" in totalSizeResponse.body + + client1.patchAvailability(availability.id, totalSize=(originalSize + 20000).some) + let newUpdatedAvailability = (client1.getAvailabilities().get).findItem(availability).get + check newUpdatedAvailability.totalSize == originalSize + 20000 + check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 diff --git a/tests/integration/testupdownload.nim b/tests/integration/testupdownload.nim new file mode 100644 index 00000000..33e3dfe2 --- /dev/null +++ b/tests/integration/testupdownload.nim @@ -0,0 +1,39 @@ +import ./twonodes + +twonodessuite "Uploads and downloads", debug1 = false, debug2 = false: + + test "node allows local file downloads": + let content1 = "some file contents" + let content2 = "some other contents" + + let cid1 = client1.upload(content1).get + let cid2 = client2.upload(content2).get + + let resp1 = client1.download(cid1, local = true).get + let resp2 = client2.download(cid2, local = true).get + + check: + content1 == resp1 + content2 == resp2 + + test "node allows remote file downloads": + let content1 = "some file contents" + let content2 = "some other contents" + + let cid1 = client1.upload(content1).get + let cid2 = client2.upload(content2).get + + let resp2 = client1.download(cid2, local = false).get + let resp1 = client2.download(cid1, local = false).get + + check: + content1 == resp1 + content2 == resp2 + + test "node fails retrieving non-existing local file": + let content1 = "some file contents" + let cid1 = client1.upload(content1).get # upload to first node + let resp2 = client2.download(cid1, local = true) # try retrieving from second node + + check: + resp2.error.msg == "404 Not Found" diff --git a/tests/integration/tokens.nim b/tests/integration/tokens.nim deleted file mode 100644 index 8c1a00d7..00000000 --- a/tests/integration/tokens.nim +++ /dev/null @@ -1,15 +0,0 @@ -import codex/contracts -import codex/contracts/testtoken - -proc mint*(signer: Signer, amount = 1_000_000.u256) {.async.} = - ## Mints a considerable amount of tokens and approves them for transfer to - ## the Storage contract. - let token = TestToken.new(!deployment().address(TestToken), signer) - let storage = Storage.new(!deployment().address(Storage), signer) - await token.mint(await signer.getAddress(), amount) - await token.approve(storage.address, amount) - -proc deposit*(signer: Signer) {.async.} = - ## Deposits sufficient collateral into the Storage contract. - let storage = Storage.new(!deployment().address(Storage), signer) - await storage.deposit(await storage.collateralAmount()) diff --git a/tests/integration/twonodes.nim b/tests/integration/twonodes.nim new file mode 100644 index 00000000..abf20c57 --- /dev/null +++ b/tests/integration/twonodes.nim @@ -0,0 +1,92 @@ +import std/os +import std/macros +import std/httpclient +import ../ethertest +import ./codexclient +import ./nodes + +export ethertest +export codexclient +export nodes + +template twonodessuite*(name: string, debug1, debug2: bool | string, body) = + twonodessuite(name, $debug1, $debug2, body) + +template twonodessuite*(name: string, debug1, debug2: string, body) = + ethersuite name: + + var node1 {.inject, used.}: NodeProcess + var node2 {.inject, used.}: NodeProcess + var client1 {.inject, used.}: CodexClient + var client2 {.inject, used.}: CodexClient + var account1 {.inject, used.}: Address + var account2 {.inject, used.}: Address + + let dataDir1 = getTempDir() / "Codex1" + let dataDir2 = getTempDir() / "Codex2" + + setup: + client1 = CodexClient.new("http://localhost:8080/api/codex/v1") + client2 = CodexClient.new("http://localhost:8081/api/codex/v1") + account1 = accounts[0] + account2 = accounts[1] + + var node1Args = @[ + "--api-port=8080", + "--data-dir=" & dataDir1, + "--nat=127.0.0.1", + "--disc-ip=127.0.0.1", + "--disc-port=8090", + "--listen-addrs=/ip4/127.0.0.1/tcp/0", + "persistence", + "prover", + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + "--circom-wasm=tests/circuits/fixtures/proof_main.wasm", + "--circom-zkey=tests/circuits/fixtures/proof_main.zkey", + "--eth-account=" & $account1 + ] + + if debug1 != "true" and debug1 != "false": + node1Args.add("--log-level=" & debug1) + + node1 = startNode(node1Args, debug = debug1) + node1.waitUntilStarted() + + let bootstrap = (!client1.info()["spr"]).getStr() + + var node2Args = @[ + "--api-port=8081", + "--data-dir=" & dataDir2, + "--nat=127.0.0.1", + "--disc-ip=127.0.0.1", + "--disc-port=8091", + "--listen-addrs=/ip4/127.0.0.1/tcp/0", + "--bootstrap-node=" & bootstrap, + "persistence", + "prover", + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + "--circom-wasm=tests/circuits/fixtures/proof_main.wasm", + "--circom-zkey=tests/circuits/fixtures/proof_main.zkey", + "--eth-account=" & $account2 + ] + + if debug2 != "true" and debug2 != "false": + node2Args.add("--log-level=" & debug2) + + node2 = startNode(node2Args, debug = debug2) + node2.waitUntilStarted() + + # ensure that we have a recent block with a fresh timestamp + discard await send(ethProvider, "evm_mine") + + teardown: + client1.close() + client2.close() + + node1.stop() + node2.stop() + + removeDir(dataDir1) + removeDir(dataDir2) + + body diff --git a/tests/logging.nim b/tests/logging.nim new file mode 100644 index 00000000..ece9c9b0 --- /dev/null +++ b/tests/logging.nim @@ -0,0 +1,10 @@ +when not defined(nimscript): + import pkg/codex/logutils + + proc ignoreLogging(level: LogLevel, message: LogOutputStr) = + discard + + defaultChroniclesStream.output.writer = ignoreLogging + + {.warning[UnusedImport]:off.} + {.used.} diff --git a/tests/nim.cfg b/tests/nim.cfg deleted file mode 100644 index 11b328d0..00000000 --- a/tests/nim.cfg +++ /dev/null @@ -1,4 +0,0 @@ ---path:".." ---threads:on ---tlsEmulation:off --d:chronicles_enabled=off diff --git a/tests/nimlldb.py b/tests/nimlldb.py new file mode 100644 index 00000000..bcc96e0d --- /dev/null +++ b/tests/nimlldb.py @@ -0,0 +1,1381 @@ +import lldb +from collections import OrderedDict +from typing import Union + + +def sbvaluegetitem(self: lldb.SBValue, name: Union[int, str]) -> lldb.SBValue: + if isinstance(name, str): + return self.GetChildMemberWithName(name) + else: + return self.GetChildAtIndex(name) + + +# Make this easier to work with +lldb.SBValue.__getitem__ = sbvaluegetitem + +NIM_IS_V2 = True + + +def get_nti(value: lldb.SBValue, nim_name=None): + name_split = value.type.name.split("_") + type_nim_name = nim_name or name_split[1] + id_string = name_split[-1].split(" ")[0] + + type_info_name = "NTI" + type_nim_name.lower() + "__" + id_string + "_" + nti = value.target.FindFirstGlobalVariable(type_info_name) + if not nti.IsValid(): + type_info_name = "NTI" + "__" + id_string + "_" + nti = value.target.FindFirstGlobalVariable(type_info_name) + if not nti.IsValid(): + print(f"NimEnumPrinter: lookup global symbol: '{type_info_name}' failed for {value.type.name}.\n") + return type_nim_name, nti + + +def enum_to_string(value: lldb.SBValue, int_val=None, nim_name=None): + tname = nim_name or value.type.name.split("_")[1] + + enum_val = value.signed + if int_val is not None: + enum_val = int_val + + default_val = f"{tname}.{str(enum_val)}" + + fn_syms = value.target.FindFunctions("reprEnum") + if not fn_syms.GetSize() > 0: + return default_val + + fn_sym: lldb.SBSymbolContext = fn_syms.GetContextAtIndex(0) + + fn: lldb.SBFunction = fn_sym.function + + fn_type: lldb.SBType = fn.type + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + if arg_types.GetSize() < 2: + return default_val + + arg1_type: lldb.SBType = arg_types.GetTypeAtIndex(0) + arg2_type: lldb.SBType = arg_types.GetTypeAtIndex(1) + + ty_info_name, nti = get_nti(value, nim_name=tname) + + if not nti.IsValid(): + return default_val + + call = f"{fn.name}(({arg1_type.name}){enum_val}, ({arg2_type.name})" + str(nti.GetLoadAddress()) + ");" + + res = executeCommand(call) + + if res.error.fail: + return default_val + + return f"{tname}.{res.summary[1:-1]}" + + +def to_string(value: lldb.SBValue): + # For getting NimStringDesc * value + value = value.GetNonSyntheticValue() + + # Check if data pointer is Null + if value.type.is_pointer and value.unsigned == 0: + return None + + size = int(value["Sup"]["len"].unsigned) + + if size == 0: + return "" + + if size > 2**14: + return "... (too long) ..." + + data = value["data"] + + # Check if first element is NULL + base_data_type = value.target.FindFirstType("char") + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return None + + cast = data.Cast(value.target.FindFirstType("char").GetArrayType(size)) + return bytearray(cast.data.uint8s).decode("utf-8") + + +def to_stringV2(value: lldb.SBValue): + # For getting NimStringV2 value + value = value.GetNonSyntheticValue() + + data = value["p"]["data"] + + # Check if data pointer is Null + if value["p"].unsigned == 0: + return None + + size = int(value["len"].signed) + + if size == 0: + return "" + + if size > 2**14: + return "... (too long) ..." + + # Check if first element is NULL + base_data_type = data.type.GetArrayElementType().GetTypedefedType() + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return None + + cast = data.Cast(base_data_type.GetArrayType(size)) + return bytearray(cast.data.uint8s).decode("utf-8") + + +def NimString(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + if NIM_IS_V2: + res = to_stringV2(value) + else: + res = to_string(value) + + if res is not None: + return f'"{res}"' + else: + return "nil" + + +def rope_helper(value: lldb.SBValue) -> str: + value = value.GetNonSyntheticValue() + if value.type.is_pointer and value.unsigned == 0: + return "" + + if value["length"].unsigned == 0: + return "" + + if NIM_IS_V2: + str_val = to_stringV2(value["data"]) + else: + str_val = to_string(value["data"]) + + if str_val is None: + str_val = "" + + return rope_helper(value["left"]) + str_val + rope_helper(value["right"]) + + +def Rope(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + rope_str = rope_helper(value) + + if len(rope_str) == 0: + rope_str = "nil" + else: + rope_str = f'"{rope_str}"' + + return f"Rope({rope_str})" + + +def NCSTRING(value: lldb.SBValue, internal_dict=None): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + ty = value.Dereference().type + val = value.target.CreateValueFromAddress( + value.name or "temp", lldb.SBAddress(value.unsigned, value.target), ty + ).AddressOf() + return val.summary + + +def ObjectV2(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + orig_value = value.GetNonSyntheticValue() + if orig_value.type.is_pointer and orig_value.unsigned == 0: + return "nil" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + while orig_value.type.is_pointer: + orig_value = orig_value.Dereference() + + if "_" in orig_value.type.name: + obj_name = orig_value.type.name.split("_")[1].replace("colonObjectType", "") + else: + obj_name = orig_value.type.name + + num_children = value.num_children + fields = [] + + for i in range(num_children): + fields.append(f"{value[i].name}: {value[i].summary}") + + res = f"{obj_name}(" + ", ".join(fields) + ")" + return res + + +def Number(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + if value.type.is_pointer and value.signed == 0: + return "nil" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.signed) + + +def Float(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.value) + + +def UnsignedNumber(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.unsigned) + + +def Bool(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.value) + + +def CharArray(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str([f"'{char}'" for char in value.uint8s]) + + +def Array(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + value = value.GetNonSyntheticValue() + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + value = value.GetNonSyntheticValue() + return "[" + ", ".join([value[i].summary for i in range(value.num_children)]) + "]" + + +def Tuple(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + while value.type.is_pointer: + value = value.Dereference() + + num_children = value.num_children + + fields = [] + + for i in range(num_children): + key = value[i].name + val = value[i].summary + if key.startswith("Field"): + fields.append(f"{val}") + else: + fields.append(f"{key}: {val}") + + return "(" + ", ".join(fields) + f")" + + +def is_local(value: lldb.SBValue) -> bool: + line: lldb.SBLineEntry = value.frame.GetLineEntry() + decl: lldb.SBDeclaration = value.GetDeclaration() + + if line.file == decl.file and decl.line != 0: + return True + + return False + + +def is_in_scope(value: lldb.SBValue) -> bool: + line: lldb.SBLineEntry = value.frame.GetLineEntry() + decl: lldb.SBDeclaration = value.GetDeclaration() + + if is_local(value) and decl.line < line.line: + return True + + return False + + +def Enum(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_value_summary(value) + if custom_summary is not None: + return custom_summary + + return enum_to_string(value) + + +def EnumSet(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + vals = [] + max_vals = 7 + for child in value.children: + vals.append(child.summary) + if len(vals) > max_vals: + vals.append("...") + break + + return "{" + ", ".join(vals) + "}" + + +def Set(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + vals = [] + max_vals = 7 + for child in value.children: + vals.append(child.value) + if len(vals) > max_vals: + vals.append("...") + break + + return "{" + ", ".join(vals) + "}" + + +def Table(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + fields = [] + + for i in range(value.num_children): + key = value[i].name + val = value[i].summary + fields.append(f"{key}: {val}") + + return "Table({" + ", ".join(fields) + "})" + + +def HashSet(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + fields = [] + + for i in range(value.num_children): + fields.append(f"{value[i].summary}") + + return "HashSet({" + ", ".join(fields) + "})" + + +def StringTable(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + fields = [] + + for i in range(value.num_children - 1): + key = value[i].name + val = value[i].summary + fields.append(f"{key}: {val}") + + mode = value[value.num_children - 1].summary + + return "StringTable({" + ", ".join(fields) + f"}}, mode={mode})" + + +def Sequence(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return "@[" + ", ".join([value[i].summary for i in range(value.num_children)]) + "]" + + +class StringChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + if not NIM_IS_V2: + self.data_type = self.value.target.FindFirstType("char") + + self.first_element: lldb.SBValue + self.update() + self.count = 0 + + def num_children(self): + return self.count + + def get_child_index(self, name): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + offset = index * self.data_size + return self.first_element.CreateChildAtOffset("[" + str(index) + "]", offset, self.data_type) + + def get_data(self) -> lldb.SBValue: + return self.value["p"]["data"] if NIM_IS_V2 else self.value["data"] + + def get_len(self) -> int: + if NIM_IS_V2: + if self.value["p"].unsigned == 0: + return 0 + + size = int(self.value["len"].signed) + + if size == 0: + return 0 + + data = self.value["p"]["data"] + + # Check if first element is NULL + base_data_type = data.type.GetArrayElementType().GetTypedefedType() + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return 0 + else: + if self.value.type.is_pointer and self.value.unsigned == 0: + return 0 + + size = int(self.value["Sup"]["len"].unsigned) + + if size == 0: + return 0 + + data = self.value["data"] + + # Check if first element is NULL + base_data_type = self.value.target.FindFirstType("char") + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return 0 + + return size + + def update(self): + if is_local(self.value): + if not is_in_scope(self.value): + return + + data = self.get_data() + size = self.get_len() + + self.count = size + self.first_element = data + + if NIM_IS_V2: + self.data_type = data.type.GetArrayElementType().GetTypedefedType() + + self.data_size = self.data_type.GetByteSize() + + def has_children(self): + return bool(self.num_children()) + + +class ArrayChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + self.first_element: lldb.SBValue + self.update() + + def num_children(self): + return self.has_children() and self.value.num_children + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + offset = index * self.value[index].GetByteSize() + return self.first_element.CreateChildAtOffset("[" + str(index) + "]", offset, self.data_type) + + def update(self): + if not self.has_children(): + return + + self.first_element = self.value[0] + self.data_type = self.value.type.GetArrayElementType() + + def has_children(self): + if is_local(self.value): + if not is_in_scope(self.value): + return False + return bool(self.value.num_children) + + +class SeqChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + self.first_element: lldb.SBValue + self.data: lldb.SBValue + self.count = 0 + self.update() + + def num_children(self): + return self.count + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + offset = index * self.data[index].GetByteSize() + return self.first_element.CreateChildAtOffset("[" + str(index) + "]", offset, self.data_type) + + def get_data(self) -> lldb.SBValue: + return self.value["p"]["data"] if NIM_IS_V2 else self.value["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["len"] if NIM_IS_V2 else self.value["Sup"]["len"] + + def update(self): + self.count = 0 + + if is_local(self.value): + if not is_in_scope(self.value): + return + + self.count = self.get_len().unsigned + + if not self.has_children(): + return + + data = self.get_data() + self.data_type = data.type.GetArrayElementType() + + self.data = data.Cast(self.data_type.GetArrayType(self.num_children())) + self.first_element = self.data + + def has_children(self): + return bool(self.num_children()) + + +class ObjectChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + self.first_element: lldb.SBValue + self.data: lldb.SBValue + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.children) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def populate_children(self): + self.children.clear() + self.child_list = [] + + if is_local(self.value): + if not is_in_scope(self.value): + return + + stack = [self.value.GetNonSyntheticValue()] + + index = 0 + + while stack: + cur_val = stack.pop() + if cur_val.type.is_pointer and cur_val.unsigned == 0: + continue + + while cur_val.type.is_pointer: + cur_val = cur_val.Dereference() + + # Add super objects if they exist + if cur_val.num_children > 0 and cur_val[0].name == "Sup" and cur_val[0].type.name.startswith("tyObject"): + stack.append(cur_val[0]) + + for child in cur_val.children: + child = child.GetNonSyntheticValue() + if child.name == "Sup": + continue + self.children[child.name] = index + self.child_list.append(child) + index += 1 + + def update(self): + self.populate_children() + + def has_children(self): + return bool(self.num_children()) + + +class HashSetChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def get_data(self) -> lldb.SBValue: + return self.value["data"]["p"]["data"] if NIM_IS_V2 else self.value["data"]["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["data"]["len"] if NIM_IS_V2 else self.value["data"]["Sup"]["len"] + + def update(self): + self.child_list = [] + + if is_local(self.value): + if not is_in_scope(self.value): + return + + tuple_len = int(self.get_len().unsigned) + tuple = self.get_data() + + base_data_type = tuple.type.GetArrayElementType() + + cast = tuple.Cast(base_data_type.GetArrayType(tuple_len)) + + index = 0 + for i in range(tuple_len): + el = cast[i] + field0 = int(el[0].unsigned) + if field0 == 0: + continue + key = el[1] + child = key.CreateValueFromAddress(f"[{str(index)}]", key.GetLoadAddress(), key.GetType()) + index += 1 + + self.child_list.append(child) + + def has_children(self): + return bool(self.num_children()) + + +class SetCharChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType("char") + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + + cur_pos = 0 + for child in self.value.children: + child_val = child.signed + if child_val != 0: + temp = child_val + num_bits = 8 + while temp != 0: + is_set = temp & 1 + if is_set == 1: + data = lldb.SBData.CreateDataFromInt(cur_pos) + child = self.value.synthetic_child_from_data(f"[{len(self.child_list)}]", data, self.ty) + self.child_list.append(child) + temp = temp >> 1 + cur_pos += 1 + num_bits -= 1 + cur_pos += num_bits + else: + cur_pos += 8 + + def has_children(self): + return bool(self.num_children()) + + +def create_set_children(value: lldb.SBValue, child_type: lldb.SBType, starting_pos: int) -> list[lldb.SBValue]: + child_list: list[lldb.SBValue] = [] + cur_pos = starting_pos + + if value.num_children > 0: + children = value.children + else: + children = [value] + + for child in children: + child_val = child.signed + if child_val != 0: + temp = child_val + num_bits = 8 + while temp != 0: + is_set = temp & 1 + if is_set == 1: + data = lldb.SBData.CreateDataFromInt(cur_pos) + child = value.synthetic_child_from_data(f"[{len(child_list)}]", data, child_type) + child_list.append(child) + temp = temp >> 1 + cur_pos += 1 + num_bits -= 1 + cur_pos += num_bits + else: + cur_pos += 8 + + return child_list + + +class SetIntChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType(f"NI64") + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + bits = self.value.GetByteSize() * 8 + starting_pos = -(bits // 2) + self.child_list = create_set_children(self.value, self.ty, starting_pos) + + def has_children(self): + return bool(self.num_children()) + + +class SetUIntChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType(f"NU64") + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + self.child_list = create_set_children(self.value, self.ty, starting_pos=0) + + def has_children(self): + return bool(self.num_children()) + + +class SetEnumChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType(self.value.type.name.replace("tySet_", "")) + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + if is_local(self.value): + if not is_in_scope(self.value): + return + self.child_list = create_set_children(self.value, self.ty, starting_pos=0) + + def has_children(self): + return bool(self.num_children()) + + +class TableChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def get_data(self) -> lldb.SBValue: + return self.value["data"]["p"]["data"] if NIM_IS_V2 else self.value["data"]["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["data"]["len"] if NIM_IS_V2 else self.value["data"]["Sup"]["len"] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + + tuple_len = int(self.get_len().unsigned) + tuple = self.get_data() + + base_data_type = tuple.type.GetArrayElementType() + + cast = tuple.Cast(base_data_type.GetArrayType(tuple_len)) + + index = 0 + for i in range(tuple_len): + el = cast[i] + field0 = int(el[0].unsigned) + if field0 == 0: + continue + key = el[1] + val = el[2] + key_summary = key.summary + child = self.value.CreateValueFromAddress(key_summary, val.GetLoadAddress(), val.GetType()) + self.child_list.append(child) + self.children[key_summary] = index + index += 1 + + def has_children(self): + return bool(self.num_children()) + + +class StringTableChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def get_data(self) -> lldb.SBValue: + return self.value["data"]["p"]["data"] if NIM_IS_V2 else self.value["data"]["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["data"]["len"] if NIM_IS_V2 else self.value["data"]["Sup"]["len"] + + def update(self): + self.children.clear() + self.child_list = [] + + if is_local(self.value): + if not is_in_scope(self.value): + return + + tuple_len = int(self.get_len().unsigned) + tuple = self.get_data() + + base_data_type = tuple.type.GetArrayElementType() + + cast = tuple.Cast(base_data_type.GetArrayType(tuple_len)) + + index = 0 + for i in range(tuple_len): + el = cast[i] + field0 = int(el[2].unsigned) + if field0 == 0: + continue + key = el[0] + val = el[1] + child = val.CreateValueFromAddress(key.summary, val.GetLoadAddress(), val.GetType()) + self.child_list.append(child) + self.children[key.summary] = index + index += 1 + + self.child_list.append(self.value["mode"]) + self.children["mode"] = index + + def has_children(self): + return bool(self.num_children()) + + +class LLDBDynamicObjectProvider: + def __init__(self, value: lldb.SBValue, internalDict): + value = value.GetNonSyntheticValue() + self.value: lldb.SBValue = value[0] + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + + while self.value.type.is_pointer: + self.value = self.value.Dereference() + + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.children.clear() + self.child_list = [] + + for i, child in enumerate(self.value.children): + name = child.name.strip('"') + new_child = child.CreateValueFromAddress(name, child.GetLoadAddress(), child.GetType()) + + self.children[name] = i + self.child_list.append(new_child) + + def has_children(self): + return bool(self.num_children()) + + +class LLDBBasicObjectProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value: lldb.SBValue = value + + def num_children(self): + if self.value is not None: + return self.value.num_children + return 0 + + def get_child_index(self, name: str): + return self.value.GetIndexOfChildWithName(name) + + def get_child_at_index(self, index): + return self.value.GetChildAtIndex(index) + + def update(self): + pass + + def has_children(self): + return self.num_children() > 0 + + +class CustomObjectChildrenProvider: + """ + This children provider handles values returned from lldbDebugSynthetic* + Nim procedures + """ + + def __init__(self, value: lldb.SBValue, internalDict): + self.value: lldb.SBValue = get_custom_synthetic(value) or value + if "lldbdynamicobject" in self.value.type.name.lower(): + self.provider = LLDBDynamicObjectProvider(self.value, internalDict) + else: + self.provider = LLDBBasicObjectProvider(self.value, internalDict) + + def num_children(self): + return self.provider.num_children() + + def get_child_index(self, name: str): + return self.provider.get_child_index(name) + + def get_child_at_index(self, index): + return self.provider.get_child_at_index(index) + + def update(self): + self.provider.update() + + def has_children(self): + return self.provider.has_children() + + +def echo(debugger: lldb.SBDebugger, command: str, result, internal_dict): + debugger.HandleCommand("po " + command) + + +SUMMARY_FUNCTIONS: dict[str, lldb.SBFunction] = {} +SYNTHETIC_FUNCTIONS: dict[str, lldb.SBFunction] = {} + + +def get_custom_summary(value: lldb.SBValue) -> Union[str, None]: + """Get a custom summary if a function exists for it""" + value = value.GetNonSyntheticValue() + if value.GetAddress().GetOffset() == 0: + return None + + base_type = get_base_type(value.type) + + fn = SUMMARY_FUNCTIONS.get(base_type.name) + if fn is None: + return None + + fn_type: lldb.SBType = fn.type + + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + first_type = arg_types.GetTypeAtIndex(0) + + while value.type.is_pointer: + value = value.Dereference() + + if first_type.is_pointer: + command = f"{fn.name}(({first_type.name})" + str(value.GetLoadAddress()) + ");" + else: + command = f"{fn.name}(*({first_type.GetPointerType().name})" + str(value.GetLoadAddress()) + ");" + + res = executeCommand(command) + + if res.error.fail: + return None + + return res.summary.strip('"') + + +def get_custom_value_summary(value: lldb.SBValue) -> Union[str, None]: + """Get a custom summary if a function exists for it""" + + fn: lldb.SBFunction = SUMMARY_FUNCTIONS.get(value.type.name) + if fn is None: + return None + + command = f"{fn.name}(({value.type.name})" + str(value.signed) + ");" + res = executeCommand(command) + + if res.error.fail: + return None + + return res.summary.strip('"') + + +def get_custom_synthetic(value: lldb.SBValue) -> Union[lldb.SBValue, None]: + """Get a custom synthetic object if a function exists for it""" + value = value.GetNonSyntheticValue() + if value.GetAddress().GetOffset() == 0: + return None + + base_type = get_base_type(value.type) + + fn = SYNTHETIC_FUNCTIONS.get(base_type.name) + if fn is None: + return None + + fn_type: lldb.SBType = fn.type + + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + first_type = arg_types.GetTypeAtIndex(0) + + while value.type.is_pointer: + value = value.Dereference() + + if first_type.is_pointer: + first_arg = f"({first_type.name}){value.GetLoadAddress()}" + else: + first_arg = f"*({first_type.GetPointerType().name}){value.GetLoadAddress()}" + + if arg_types.GetSize() > 1 and fn.GetArgumentName(1) == "Result": + ret_type = arg_types.GetTypeAtIndex(1) + ret_type = get_base_type(ret_type) + + command = f""" + {ret_type.name} lldbT; + nimZeroMem((void*)(&lldbT), sizeof({ret_type.name})); + {fn.name}(({first_arg}), (&lldbT)); + lldbT; + """ + else: + command = f"{fn.name}({first_arg});" + + res = executeCommand(command) + + if res.error.fail: + print(res.error) + return None + + return res + + +def get_base_type(ty: lldb.SBType) -> lldb.SBType: + """Get the base type of the type""" + temp = ty + while temp.IsPointerType(): + temp = temp.GetPointeeType() + return temp + + +def use_base_type(ty: lldb.SBType) -> bool: + types_to_check = [ + "NF", + "NF32", + "NF64", + "NI", + "NI8", + "NI16", + "NI32", + "NI64", + "bool", + "NIM_BOOL", + "NU", + "NU8", + "NU16", + "NU32", + "NU64", + ] + + for type_to_check in types_to_check: + if ty.name.startswith(type_to_check): + return False + + return True + + +def breakpoint_function_wrapper(frame: lldb.SBFrame, bp_loc, internal_dict): + """This allows function calls to Nim for custom object summaries and synthetic children""" + debugger = lldb.debugger + + global SUMMARY_FUNCTIONS + global SYNTHETIC_FUNCTIONS + + global NIM_IS_V2 + + for tname, fn in SYNTHETIC_FUNCTIONS.items(): + debugger.HandleCommand(f"type synthetic delete -w nim {tname}") + + SUMMARY_FUNCTIONS = {} + SYNTHETIC_FUNCTIONS = {} + + target: lldb.SBTarget = debugger.GetSelectedTarget() + + NIM_IS_V2 = target.FindFirstType("TNimTypeV2").IsValid() + + module = frame.GetSymbolContext(lldb.eSymbolContextModule).module + + for sym in module: + if ( + not sym.name.startswith("lldbDebugSummary") + and not sym.name.startswith("lldbDebugSynthetic") + and not sym.name.startswith("dollar___") + ): + continue + + fn_syms: lldb.SBSymbolContextList = target.FindFunctions(sym.name) + if not fn_syms.GetSize() > 0: + continue + + fn_sym: lldb.SBSymbolContext = fn_syms.GetContextAtIndex(0) + + fn: lldb.SBFunction = fn_sym.function + fn_type: lldb.SBType = fn.type + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + + if arg_types.GetSize() > 1 and fn.GetArgumentName(1) == "Result": + pass # don't continue + elif arg_types.GetSize() != 1: + continue + + arg_type: lldb.SBType = arg_types.GetTypeAtIndex(0) + if use_base_type(arg_type): + arg_type = get_base_type(arg_type) + + if sym.name.startswith("lldbDebugSummary") or sym.name.startswith("dollar___"): + SUMMARY_FUNCTIONS[arg_type.name] = fn + elif sym.name.startswith("lldbDebugSynthetic"): + SYNTHETIC_FUNCTIONS[arg_type.name] = fn + debugger.HandleCommand( + f"type synthetic add -w nim -l {__name__}.CustomObjectChildrenProvider {arg_type.name}" + ) + + +def executeCommand(command, *args): + debugger = lldb.debugger + process = debugger.GetSelectedTarget().GetProcess() + frame: lldb.SBFrame = process.GetSelectedThread().GetSelectedFrame() + + expr_options = lldb.SBExpressionOptions() + expr_options.SetIgnoreBreakpoints(False) + expr_options.SetFetchDynamicValue(lldb.eDynamicCanRunTarget) + expr_options.SetTimeoutInMicroSeconds(30 * 1000 * 1000) # 30 second timeout + expr_options.SetTryAllThreads(True) + expr_options.SetUnwindOnError(False) + expr_options.SetGenerateDebugInfo(True) + expr_options.SetLanguage(lldb.eLanguageTypeC) + expr_options.SetCoerceResultToId(True) + res = frame.EvaluateExpression(command, expr_options) + + return res + + +def __lldb_init_module(debugger, internal_dict): + # fmt: off + print("internal_dict: ", internal_dict.keys()) + debugger.HandleCommand(f"breakpoint command add -F {__name__}.breakpoint_function_wrapper --script-type python 1") + debugger.HandleCommand(f"type summary add -w nim -n sequence -F {__name__}.Sequence -x tySequence_+[[:alnum:]]+$") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SeqChildrenProvider -x tySequence_+[[:alnum:]]+$") + + debugger.HandleCommand(f"type summary add -w nim -n chararray -F {__name__}.CharArray -x char\s+[\d+]") + debugger.HandleCommand(f"type summary add -w nim -n array -F {__name__}.Array -x tyArray_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.ArrayChildrenProvider -x tyArray_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n string -F {__name__}.NimString NimStringDesc") + + debugger.HandleCommand(f"type summary add -w nim -n stringv2 -F {__name__}.NimString -x NimStringV2$") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.StringChildrenProvider -x NimStringV2$") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.StringChildrenProvider -x NimStringDesc$") + + debugger.HandleCommand(f"type summary add -w nim -n cstring -F {__name__}.NCSTRING NCSTRING") + + debugger.HandleCommand(f"type summary add -w nim -n object -F {__name__}.ObjectV2 -x tyObject_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.ObjectChildrenProvider -x tyObject_+[[:alnum:]]+_+[[:alnum:]]+$") + + debugger.HandleCommand(f"type summary add -w nim -n tframe -F {__name__}.ObjectV2 -x TFrame$") + + debugger.HandleCommand(f"type summary add -w nim -n rootobj -F {__name__}.ObjectV2 -x RootObj$") + + debugger.HandleCommand(f"type summary add -w nim -n enum -F {__name__}.Enum -x tyEnum_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n hashset -F {__name__}.HashSet -x tyObject_+HashSet_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.HashSetChildrenProvider -x tyObject_+HashSet_+[[:alnum:]]+") + + debugger.HandleCommand(f"type summary add -w nim -n rope -F {__name__}.Rope -x tyObject_+Rope[[:alnum:]]+_+[[:alnum:]]+") + + debugger.HandleCommand(f"type summary add -w nim -n setuint -F {__name__}.Set -x tySet_+tyInt_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetIntChildrenProvider -x tySet_+tyInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setint -F {__name__}.Set -x tySet_+tyInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setuint2 -F {__name__}.Set -x tySet_+tyUInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetUIntChildrenProvider -x tySet_+tyUInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetUIntChildrenProvider -x tySet_+tyInt_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setenum -F {__name__}.EnumSet -x tySet_+tyEnum_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetEnumChildrenProvider -x tySet_+tyEnum_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setchar -F {__name__}.Set -x tySet_+tyChar_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetCharChildrenProvider -x tySet_+tyChar_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n table -F {__name__}.Table -x tyObject_+Table_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.TableChildrenProvider -x tyObject_+Table_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n stringtable -F {__name__}.StringTable -x tyObject_+StringTableObj_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.StringTableChildrenProvider -x tyObject_+StringTableObj_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n tuple2 -F {__name__}.Tuple -x tyObject_+Tuple_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n tuple -F {__name__}.Tuple -x tyTuple_+[[:alnum:]]+") + + debugger.HandleCommand(f"type summary add -w nim -n float -F {__name__}.Float NF") + debugger.HandleCommand(f"type summary add -w nim -n float32 -F {__name__}.Float NF32") + debugger.HandleCommand(f"type summary add -w nim -n float64 -F {__name__}.Float NF64") + debugger.HandleCommand(f"type summary add -w nim -n integer -F {__name__}.Number -x NI") + debugger.HandleCommand(f"type summary add -w nim -n integer8 -F {__name__}.Number -x NI8") + debugger.HandleCommand(f"type summary add -w nim -n integer16 -F {__name__}.Number -x NI16") + debugger.HandleCommand(f"type summary add -w nim -n integer32 -F {__name__}.Number -x NI32") + debugger.HandleCommand(f"type summary add -w nim -n integer64 -F {__name__}.Number -x NI64") + debugger.HandleCommand(f"type summary add -w nim -n bool -F {__name__}.Bool -x bool") + debugger.HandleCommand(f"type summary add -w nim -n bool2 -F {__name__}.Bool -x NIM_BOOL") + debugger.HandleCommand(f"type summary add -w nim -n uinteger -F {__name__}.UnsignedNumber -x NU") + debugger.HandleCommand(f"type summary add -w nim -n uinteger8 -F {__name__}.UnsignedNumber -x NU8") + debugger.HandleCommand(f"type summary add -w nim -n uinteger16 -F {__name__}.UnsignedNumber -x NU16") + debugger.HandleCommand(f"type summary add -w nim -n uinteger32 -F {__name__}.UnsignedNumber -x NU32") + debugger.HandleCommand(f"type summary add -w nim -n uinteger64 -F {__name__}.UnsignedNumber -x NU64") + debugger.HandleCommand("type category enable nim") + debugger.HandleCommand(f"command script add -f {__name__}.echo echo") + # fmt: on diff --git a/tests/testCodex.nim b/tests/testCodex.nim index 5ddd38da..6a9b107e 100644 --- a/tests/testCodex.nim +++ b/tests/testCodex.nim @@ -1,18 +1,21 @@ import ./codex/teststores import ./codex/testblockexchange -import ./codex/teststorageproofs import ./codex/testasyncheapqueue import ./codex/testchunking +import ./codex/testlogutils import ./codex/testmanifest import ./codex/testnode import ./codex/teststorestream import ./codex/testpurchasing import ./codex/testsales import ./codex/testerasure -import ./codex/testproving import ./codex/testutils - -# to check that everything compiles -import ../codex +import ./codex/testclock +import ./codex/testsystemclock +import ./codex/testvalidation +import ./codex/testasyncstreamwrapper +import ./codex/testmerkletree +import ./codex/testslots +import ./codex/testindexingstrategy {.warning[UnusedImport]: off.} diff --git a/tests/testCodex.nim.cfg b/tests/testCodex.nim.cfg deleted file mode 100644 index 0e237893..00000000 --- a/tests/testCodex.nim.cfg +++ /dev/null @@ -1 +0,0 @@ --d:chronicles_log_level=WARN diff --git a/tests/testContracts.nim b/tests/testContracts.nim index 0e8fde1d..4283c10a 100644 --- a/tests/testContracts.nim +++ b/tests/testContracts.nim @@ -1,8 +1,6 @@ -import ./contracts/testCollateral import ./contracts/testContracts import ./contracts/testMarket -import ./contracts/testProofs -import ./contracts/testInteractions +import ./contracts/testDeployment import ./contracts/testClock {.warning[UnusedImport]:off.} diff --git a/tests/testContracts.nim.cfg b/tests/testContracts.nim.cfg deleted file mode 100644 index 0e237893..00000000 --- a/tests/testContracts.nim.cfg +++ /dev/null @@ -1 +0,0 @@ --d:chronicles_log_level=WARN diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index d58e461c..b1f81ef4 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -1,158 +1,11 @@ -import std/osproc -import std/os -import std/httpclient -import std/json -import std/strutils +import ./integration/testcli +import ./integration/testrestapi +import ./integration/testupdownload +import ./integration/testsales +import ./integration/testpurchasing +import ./integration/testblockexpiration +import ./integration/testmarketplace +import ./integration/testproofs +import ./integration/testecbug -import pkg/chronos -import ./ethertest -import ./contracts/time -import ./integration/nodes -import ./integration/tokens -import ./codex/helpers/eventually - -ethersuite "Integration tests": - - var node1, node2: NodeProcess - var baseurl1, baseurl2: string - var client: HttpClient - - let dataDir1 = getTempDir() / "Codex1" - let dataDir2 = getTempDir() / "Codex2" - - setup: - await provider.getSigner(accounts[0]).mint() - await provider.getSigner(accounts[1]).mint() - await provider.getSigner(accounts[1]).deposit() - - baseurl1 = "http://localhost:8080/api/codex/v1" - baseurl2 = "http://localhost:8081/api/codex/v1" - client = newHttpClient() - - node1 = startNode([ - "--api-port=8080", - "--data-dir=" & dataDir1, - "--nat=127.0.0.1", - "--disc-ip=127.0.0.1", - "--disc-port=8090", - "--persistence", - "--eth-account=" & $accounts[0] - ], debug = false) - - let - bootstrap = strip( - $(parseJson(client.get(baseurl1 & "/debug/info").body)["spr"]), - chars = {'"'}) - - node2 = startNode([ - "--api-port=8081", - "--data-dir=" & dataDir2, - "--nat=127.0.0.1", - "--disc-ip=127.0.0.1", - "--disc-port=8091", - "--bootstrap-node=" & bootstrap, - "--persistence", - "--eth-account=" & $accounts[1] - ], debug = false) - - teardown: - client.close() - node1.stop() - node2.stop() - - dataDir1.removeDir() - dataDir2.removeDir() - - test "nodes can print their peer information": - let info1 = client.get(baseurl1 & "/debug/info").body - let info2 = client.get(baseurl2 & "/debug/info").body - check info1 != info2 - - test "nodes should set chronicles log level": - client.headers = newHttpHeaders({ "Content-Type": "text/plain" }) - let filter = "/debug/chronicles/loglevel?level=DEBUG;TRACE:codex" - check client.request(baseurl1 & filter, httpMethod = HttpPost, body = "").status == "200 OK" - - test "node accepts file uploads": - let url = baseurl1 & "/upload" - let response = client.post(url, "some file contents") - check response.status == "200 OK" - - test "node handles new storage availability": - let url = baseurl1 & "/sales/availability" - let json = %*{"size": "0x1", "duration": "0x2", "minPrice": "0x3"} - check client.post(url, $json).status == "200 OK" - - test "node lists storage that is for sale": - let url = baseurl1 & "/sales/availability" - let json = %*{"size": "0x1", "duration": "0x2", "minPrice": "0x3"} - let availability = parseJson(client.post(url, $json).body) - let response = client.get(url) - check response.status == "200 OK" - check %*availability in parseJson(response.body) - - test "node handles storage request": - let cid = client.post(baseurl1 & "/upload", "some file contents").body - let url = baseurl1 & "/storage/request/" & cid - let json = %*{"duration": "0x1", "reward": "0x2"} - let response = client.post(url, $json) - check response.status == "200 OK" - - test "node retrieves purchase status": - let cid = client.post(baseurl1 & "/upload", "some file contents").body - let request = %*{"duration": "0x1", "reward": "0x2"} - let id = client.post(baseurl1 & "/storage/request/" & cid, $request).body - let response = client.get(baseurl1 & "/storage/purchases/" & id) - check response.status == "200 OK" - let json = parseJson(response.body) - check json["request"]["ask"]["duration"].getStr == "0x1" - check json["request"]["ask"]["reward"].getStr == "0x2" - - test "node remembers purchase status after restart": - let cid = client.post(baseurl1 & "/upload", "some file contents").body - let request = %*{"duration": "0x1", "reward": "0x2"} - let id = client.post(baseurl1 & "/storage/request/" & cid, $request).body - - proc getPurchase(id: string): JsonNode = - let response = client.get(baseurl1 & "/storage/purchases/" & id) - return parseJson(response.body).catch |? nil - - check eventually getPurchase(id){"state"}.getStr == "submitted" - - node1.restart() - - client.close() - client = newHttpClient() - - check eventually (not isNil getPurchase(id){"request"}{"ask"}) - check getPurchase(id){"request"}{"ask"}{"duration"}.getStr == "0x1" - check getPurchase(id){"request"}{"ask"}{"reward"}.getStr == "0x2" - - test "nodes negotiate contracts on the marketplace": - proc sell = - let json = %*{"size": "0xFFFFF", "duration": "0x200", "minPrice": "0x300"} - discard client.post(baseurl2 & "/sales/availability", $json) - - proc available: JsonNode = - client.get(baseurl2 & "/sales/availability").body.parseJson - - proc upload: string = - client.post(baseurl1 & "/upload", "some file contents").body - - proc buy(cid: string): string = - let expiry = ((waitFor provider.currentTime()) + 30).toHex - let json = %*{"duration": "0x1", "reward": "0x400", "expiry": expiry} - client.post(baseurl1 & "/storage/request/" & cid, $json).body - - proc finish(purchase: string): Future[JsonNode] {.async.} = - while true: - let response = client.get(baseurl1 & "/storage/purchases/" & purchase) - let json = parseJson(response.body) - if json["state"].getStr == "finished": return json - await sleepAsync(1.seconds) - - sell() - let purchase = waitFor upload().buy().finish() - - check purchase["error"].getStr == "" - check available().len == 0 +{.warning[UnusedImport]:off.} diff --git a/tests/testTaiko.nim b/tests/testTaiko.nim new file mode 100644 index 00000000..a799697b --- /dev/null +++ b/tests/testTaiko.nim @@ -0,0 +1,76 @@ +import std/times +import std/os +import std/json +import std/tempfiles +import pkg/chronos +import pkg/stint +import pkg/questionable +import pkg/questionable/results + +import ./asynctest +import ./integration/nodes + + +suite "Taiko L2 Integration Tests": + + var node1, node2: NodeProcess + + setup: + doAssert existsEnv("CODEX_ETH_PRIVATE_KEY"), "Key for Taiko account missing" + + node1 = startNode([ + "--data-dir=" & createTempDir("", ""), + "--api-port=8080", + "--nat=127.0.0.1", + "--disc-ip=127.0.0.1", + "--disc-port=8090", + "--persistence", + "--eth-provider=https://rpc.test.taiko.xyz" + ]) + node1.waitUntilStarted() + + let bootstrap = (!node1.client.info())["spr"].getStr() + + node2 = startNode([ + "--data-dir=" & createTempDir("", ""), + "--api-port=8081", + "--nat=127.0.0.1", + "--disc-ip=127.0.0.1", + "--disc-port=8091", + "--bootstrap-node=" & bootstrap, + "--persistence", + "--eth-provider=https://rpc.test.taiko.xyz" + ]) + node2.waitUntilStarted() + + teardown: + node1.stop() + node2.stop() + node1.removeDataDir() + node2.removeDataDir() + + test "node 1 buys storage from node 2": + discard node2.client.postAvailability( + size=0xFFFFF.u256, + duration=200.u256, + minPrice=300.u256, + maxCollateral=300.u256 + ) + let cid = !node1.client.upload("some file contents") + + echo " - requesting storage, expires in 5 minutes" + let expiry = getTime().toUnix().uint64 + 5 * 60 + let purchase = !node1.client.requestStorage( + cid, + duration=30.u256, + reward=400.u256, + proofProbability=3.u256, + collateral=200.u256, + expiry=expiry.u256 + ) + + echo " - waiting for request to start, timeout 5 minutes" + check eventually(node1.client.getPurchase(purchase).?state == success "started", timeout = 5 * 60 * 1000) + + echo " - waiting for request to finish, timeout 1 minute" + check eventually(node1.client.getPurchase(purchase).?state == success "finished", timeout = 1 * 60 * 1000) diff --git a/vendor/asynctest b/vendor/asynctest index a236a5f0..8e2f4e73 160000 --- a/vendor/asynctest +++ b/vendor/asynctest @@ -1 +1 @@ -Subproject commit a236a5f0f3031573ac2cb082b63dbf6e170e06e7 +Subproject commit 8e2f4e73b97123be0f0041c129942b32df23ecb1 diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth new file mode 160000 index 00000000..ed428767 --- /dev/null +++ b/vendor/codex-contracts-eth @@ -0,0 +1 @@ +Subproject commit ed428767b3323048533b4d576888f36372bd9b27 diff --git a/vendor/codex-storage-proofs-circuits b/vendor/codex-storage-proofs-circuits new file mode 160000 index 00000000..c03b4322 --- /dev/null +++ b/vendor/codex-storage-proofs-circuits @@ -0,0 +1 @@ +Subproject commit c03b43221d68e34bd5015a4e4ee1a0ad3299f8ef diff --git a/vendor/constantine b/vendor/constantine new file mode 160000 index 00000000..8367d7d1 --- /dev/null +++ b/vendor/constantine @@ -0,0 +1 @@ +Subproject commit 8367d7d19cdbba874aab961b70d272e742184c37 diff --git a/vendor/dagger-contracts b/vendor/dagger-contracts deleted file mode 160000 index 61b8f5fc..00000000 --- a/vendor/dagger-contracts +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 61b8f5fc352838866b0fe27b936323de45bf269c diff --git a/vendor/dnsclient.nim b/vendor/dnsclient.nim index fbb76f8a..23214235 160000 --- a/vendor/dnsclient.nim +++ b/vendor/dnsclient.nim @@ -1 +1 @@ -Subproject commit fbb76f8af8a33ab818184a7d4406d9fee20993be +Subproject commit 23214235d4784d24aceed99bbfe153379ea557c8 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index f4c4233d..99fcb340 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit f4c4233de453cb7eac0ce3f3ffad6496295f83ab +Subproject commit 99fcb3405c55b27cfffbf60f5368c55da7346f23 diff --git a/vendor/nim-blscurve b/vendor/nim-blscurve index 0237e4e0..48d8668c 160000 --- a/vendor/nim-blscurve +++ b/vendor/nim-blscurve @@ -1 +1 @@ -Subproject commit 0237e4e0e914fc19359c18a66406d33bc942775c +Subproject commit 48d8668c5a9a350d3a7ee0c3713ef9a11980a40d diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index 7631f7b2..c9c8e58e 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit 7631f7b2ee03398cb1512a79923264e8f9410af6 +Subproject commit c9c8e58ec3f89b655a046c485f622f9021c68b61 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 6525f4ce..035ae11b 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 6525f4ce1d1a7eba146e5f1a53f6f105077ae686 +Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9 diff --git a/vendor/nim-circom-compat b/vendor/nim-circom-compat new file mode 160000 index 00000000..4467e310 --- /dev/null +++ b/vendor/nim-circom-compat @@ -0,0 +1 @@ +Subproject commit 4467e310b75aa0749ff28c1572a84ffce57d7c1c diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht new file mode 160000 index 00000000..63822e83 --- /dev/null +++ b/vendor/nim-codex-dht @@ -0,0 +1 @@ +Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6 diff --git a/vendor/nim-confutils b/vendor/nim-confutils index 0435e678..2028b416 160000 --- a/vendor/nim-confutils +++ b/vendor/nim-confutils @@ -1 +1 @@ -Subproject commit 0435e67832b6bb8dfdf0ddb102903e9d820206d2 +Subproject commit 2028b41602b3abf7c9bf450744efde7b296707a2 diff --git a/vendor/nim-contract-abi b/vendor/nim-contract-abi index b111c27b..61f8f59b 160000 --- a/vendor/nim-contract-abi +++ b/vendor/nim-contract-abi @@ -1 +1 @@ -Subproject commit b111c27b619fc1d81fb1c6942372824a18a71960 +Subproject commit 61f8f59b3917d8e27c6eb4330a6d8cf428e98b2d diff --git a/vendor/nim-datastore b/vendor/nim-datastore index 6c06a3b0..3ab6b84a 160000 --- a/vendor/nim-datastore +++ b/vendor/nim-datastore @@ -1 +1 @@ -Subproject commit 6c06a3b095d1935aaf5eec66295862c9c3b4bac5 +Subproject commit 3ab6b84a634a7b2ee8c0144f050bf5893cd47c17 diff --git a/vendor/nim-eth b/vendor/nim-eth index 5885f638..15a09fab 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 5885f638e47b8607683ef9e1e77fc21ce1aede44 +Subproject commit 15a09fab737d08a2545284c727199c377bb0f4b7 diff --git a/vendor/nim-ethers b/vendor/nim-ethers index e8592bb9..5b170adc 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit e8592bb92219f5fcec25af22bed474a4f95c0a54 +Subproject commit 5b170adcb1ffb1dbb273d1b7679bf3d9a08adb76 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index 1b561a9e..720fc5e5 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit 1b561a9e71b6bdad1c1cdff753418906037e9d09 +Subproject commit 720fc5e5c8e428d9d0af618e1e27c44b42350309 diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index e88e231d..be57dbc9 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit e88e231dfcef4585fe3b2fbd9b664dbd28a88040 +Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index 5a281760..0bf2bcbe 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit 5a281760803907f4989cacf109b516381dfbbe11 +Subproject commit 0bf2bcbe74a18a3c7a709d57108bb7b51e748a92 diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index e5b18fb7..bb53d49c 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4 +Subproject commit bb53d49caf2a6c6cf1df365ba84af93cdcfa7aa3 diff --git a/vendor/nim-leopard b/vendor/nim-leopard index ae043fd2..895ff24c 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit ae043fd262d2cc2f46db4a9f2f8054e73167a970 +Subproject commit 895ff24ca6615d577acfb11811cdd5465f596c97 diff --git a/vendor/nim-leveldbstatic b/vendor/nim-leveldbstatic new file mode 160000 index 00000000..3cb21890 --- /dev/null +++ b/vendor/nim-leveldbstatic @@ -0,0 +1 @@ +Subproject commit 3cb21890d4dc29c579d309b94f60f51ee9633a6d diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index a3e9d1ed..b239791c 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit a3e9d1ed80c048cd5abc839cbe0863cefcedc702 +Subproject commit b239791c568d9f9a76fd66d2322b2754700b6cc5 diff --git a/vendor/nim-libp2p-dht b/vendor/nim-libp2p-dht deleted file mode 160000 index e4e7a3e1..00000000 --- a/vendor/nim-libp2p-dht +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e4e7a3e11fe635de3f15e37164b3ace96f588993 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 743f81d4..6142e433 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 743f81d4f6c6ebf0ac02389f2392ff8b4235bee5 +Subproject commit 6142e433fc8ea9b73379770a788017ac528d46ff diff --git a/vendor/nim-poseidon2 b/vendor/nim-poseidon2 new file mode 160000 index 00000000..0346982f --- /dev/null +++ b/vendor/nim-poseidon2 @@ -0,0 +1 @@ +Subproject commit 0346982f2c6891bcedd03d552af3a3bd57b2c1f9 diff --git a/vendor/nim-presto b/vendor/nim-presto index 3984431d..c17bfdda 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit 3984431dc0fc829eb668e12e57e90542b041d298 +Subproject commit c17bfdda2c60cf5fadb043feb22e328b7659c719 diff --git a/vendor/nim-protobuf-serialization b/vendor/nim-protobuf-serialization new file mode 160000 index 00000000..28214b3e --- /dev/null +++ b/vendor/nim-protobuf-serialization @@ -0,0 +1 @@ +Subproject commit 28214b3e40c755a9886d2ec8f261ec48fbb6bec6 diff --git a/vendor/nim-results b/vendor/nim-results new file mode 160000 index 00000000..f3c666a2 --- /dev/null +++ b/vendor/nim-results @@ -0,0 +1 @@ +Subproject commit f3c666a272c69d70cb41e7245e7f6844797303ad diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1 index 5340cf18..2acbbdcc 160000 --- a/vendor/nim-secp256k1 +++ b/vendor/nim-secp256k1 @@ -1 +1 @@ -Subproject commit 5340cf188168d6afcafc8023770d880f067c0b2f +Subproject commit 2acbbdcc0e63002a013fff49f015708522875832 diff --git a/vendor/nim-serde b/vendor/nim-serde new file mode 160000 index 00000000..b1e5e5d3 --- /dev/null +++ b/vendor/nim-serde @@ -0,0 +1 @@ +Subproject commit b1e5e5d39a99ea56b750f6d9272dd319f4ad4291 diff --git a/vendor/nim-serialization b/vendor/nim-serialization index 493d18b8..384eb256 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit 493d18b8292fc03aa4f835fd825dea1183f97466 +Subproject commit 384eb2561ee755446cff512a8e057325848b86a7 diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index fda455cf..362e1bd9 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit fda455cfea2df707dde052034411ce63de218453 +Subproject commit 362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3 diff --git a/vendor/nim-stew b/vendor/nim-stew index 0c379cf1..7afe7e3c 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 0c379cf1d8d3d9db07af108cc78ff542b2105914 +Subproject commit 7afe7e3c070758cac1f628e4330109f3ef6fc853 diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools index 8d408ac6..b3673c7a 160000 --- a/vendor/nim-taskpools +++ b/vendor/nim-taskpools @@ -1 +1 @@ -Subproject commit 8d408ac6cfc9c24ec8b7b65d5993e85050dcbaa9 +Subproject commit b3673c7a7a959ccacb393bd9b47e997bbd177f5a diff --git a/vendor/nim-testutils b/vendor/nim-testutils new file mode 160000 index 00000000..b56a5953 --- /dev/null +++ b/vendor/nim-testutils @@ -0,0 +1 @@ +Subproject commit b56a5953e37fc5117bd6ea6dfa18418c5e112815 diff --git a/vendor/nim-toml-serialization b/vendor/nim-toml-serialization index 4e15e00e..86d47713 160000 --- a/vendor/nim-toml-serialization +++ b/vendor/nim-toml-serialization @@ -1 +1 @@ -Subproject commit 4e15e00ed9e27a8d28b40b69ef06c6a4a388ae93 +Subproject commit 86d477136f105f04bfd0dd7c0e939593d81fc581 diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 index 02c49b8a..b178f475 160000 --- a/vendor/nim-unittest2 +++ b/vendor/nim-unittest2 @@ -1 +1 @@ -Subproject commit 02c49b8a994dd3f9eddfaab45262f9b8fa507f8e +Subproject commit b178f47527074964f76c395ad0dfc81cf118f379 diff --git a/vendor/nim-websock b/vendor/nim-websock index 7b2ed397..2c3ae313 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit 7b2ed397d6e4c37ea4df08ae82aeac7ff04cd180 +Subproject commit 2c3ae3137f3c9cb48134285bd4a47186fa51f0e8 diff --git a/vendor/nim-zlib b/vendor/nim-zlib index 74cdeb54..f34ca261 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit 74cdeb54b21bededb5a515d36f608bc1850555a2 +Subproject commit f34ca261efd90f118dc1647beefd2f7a69b05d93 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index dc535cd4..b2e1fb02 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit dc535cd4627e6c1ec023ee6d6d0c3e5d66d414e5 +Subproject commit b2e1fb022f1ee800b439648953e92cc993c1264c diff --git a/vendor/nimcrypto b/vendor/nimcrypto index a5742a9a..24e006df 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit a5742a9a214ac33f91615f3862c7b099aec43b00 +Subproject commit 24e006df85927f64916e60511620583b11403178 diff --git a/vendor/npeg b/vendor/npeg new file mode 160000 index 00000000..b15a10e3 --- /dev/null +++ b/vendor/npeg @@ -0,0 +1 @@ +Subproject commit b15a10e388b91b898c581dbbcb6a718d46b27d2f diff --git a/vendor/questionable b/vendor/questionable index 30e4184a..47692e0d 160000 --- a/vendor/questionable +++ b/vendor/questionable @@ -1 +1 @@ -Subproject commit 30e4184a99c8c1ba329925912d2c5d4b09acf8cc +Subproject commit 47692e0d923ada8f7f731275b2a87614c0150987 diff --git a/vendor/stint b/vendor/stint index 036c71d0..86621ece 160000 --- a/vendor/stint +++ b/vendor/stint @@ -1 +1 @@ -Subproject commit 036c71d06a6b22f8f967ba9d54afd2189c3872ca +Subproject commit 86621eced1dcfb5e25903019ebcfc76ed9128ec5 diff --git a/vendor/urls.rules b/vendor/urls.rules new file mode 100644 index 00000000..7636ff34 --- /dev/null +++ b/vendor/urls.rules @@ -0,0 +1,8 @@ +https://github.com/status-im/nim-libp2p-dht.git -> https://github.com/codex-storage/nim-codex-dht.git +https://github.com/markspanbroek/questionable -> https://github.com/codex-storage/questionable +https://github.com/status-im/questionable -> https://github.com/codex-storage/questionable +https://github.com/status-im/asynctest -> https://github.com/codex-storage/asynctest +https://github.com/status-im/nim-datastore -> https://github.com/codex-storage/nim-datastore +https://github.com/cheatfate/nimcrypto -> https://github.com/status-im/nimcrypto +protobufserialization -> protobuf_serialization +protobufserialization -> https://github.com/status-im/nim-protobuf-serialization