Merge branch 'master' into batch-utils
This commit is contained in:
commit
f351a44d8b
|
@ -0,0 +1,6 @@
|
|||
.github
|
||||
build
|
||||
docs
|
||||
metrics
|
||||
nimcache
|
||||
tests
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
|
@ -9,17 +9,26 @@ inputs:
|
|||
cpu:
|
||||
description: "CPU to build for"
|
||||
default: "amd64"
|
||||
nim_branch:
|
||||
nim_version:
|
||||
description: "Nim version"
|
||||
default: "version-1-6"
|
||||
rust_version:
|
||||
description: "Rust version"
|
||||
default: "1.78.0"
|
||||
shell:
|
||||
description: "Shell to run commands in"
|
||||
default: "bash --noprofile --norc -e -o pipefail"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: APT (Linux amd64)
|
||||
if: inputs.os == 'linux' && inputs.cpu == 'amd64'
|
||||
- name: Rust (Linux)
|
||||
if: inputs.os == 'linux'
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${{ inputs.rust_version }} -y
|
||||
|
||||
- name: APT (Linux amd64/arm64)
|
||||
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
sudo apt-fast update -qq
|
||||
|
@ -45,6 +54,7 @@ runs:
|
|||
if: inputs.os == 'windows' && inputs.cpu == 'amd64'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
msystem: UCRT64
|
||||
install: >
|
||||
base-devel
|
||||
|
@ -52,11 +62,13 @@ runs:
|
|||
mingw-w64-ucrt-x86_64-toolchain
|
||||
mingw-w64-ucrt-x86_64-cmake
|
||||
mingw-w64-ucrt-x86_64-ntldd-git
|
||||
mingw-w64-ucrt-x86_64-rust
|
||||
|
||||
- name: MSYS2 (Windows i386)
|
||||
if: inputs.os == 'windows' && inputs.cpu == 'i386'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
msystem: MINGW32
|
||||
install: >
|
||||
base-devel
|
||||
|
@ -64,6 +76,13 @@ runs:
|
|||
mingw-w64-i686-toolchain
|
||||
mingw-w64-i686-cmake
|
||||
mingw-w64-i686-ntldd-git
|
||||
mingw-w64-i686-rust
|
||||
|
||||
- name: MSYS2 (Windows All) - Downgrade to gcc 13
|
||||
if: inputs.os == 'windows'
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
|
||||
|
||||
- name: Derive environment variables
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
|
@ -73,15 +92,10 @@ runs:
|
|||
printf "'%s'" "$quoted"
|
||||
}
|
||||
|
||||
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||
PLATFORM=x64
|
||||
else
|
||||
PLATFORM=x86
|
||||
fi
|
||||
echo "PLATFORM=${PLATFORM}" >> ${GITHUB_ENV}
|
||||
[[ '${{ inputs.cpu }}' == 'i386' ]] && echo "ARCH_OVERRIDE=ARCH_OVERRIDE=x86" >> ${GITHUB_ENV}
|
||||
|
||||
# Stack usage on Linux amd64
|
||||
if [[ '${{ inputs.os }}' == 'linux' && '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||
# Stack usage on Linux amd64/arm64
|
||||
if [[ '${{ inputs.os }}' == 'linux' && ('${{ inputs.cpu }}' == 'amd64' || '${{ inputs.cpu }}' == 'arm64')]]; then
|
||||
NIMFLAGS="${NIMFLAGS} -d:limitStackUsage"
|
||||
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
@ -135,35 +149,28 @@ runs:
|
|||
# Use all available CPUs for build process
|
||||
ncpu=""
|
||||
case '${{ inputs.os }}' in
|
||||
'linux')
|
||||
ncpu=$(nproc)
|
||||
;;
|
||||
'macos')
|
||||
ncpu=$(sysctl -n hw.ncpu)
|
||||
;;
|
||||
'windows')
|
||||
ncpu=${NUMBER_OF_PROCESSORS}
|
||||
;;
|
||||
'linux') ncpu=$(nproc) ;;
|
||||
'macos') ncpu=$(sysctl -n hw.ncpu) ;;
|
||||
'windows') ncpu=${NUMBER_OF_PROCESSORS} ;;
|
||||
esac
|
||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||
echo "ncpu=${ncpu}" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Restore Nim toolchain binaries from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: NimBinaries
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||
restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||
restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}
|
||||
|
||||
- name: Set NIM_COMMIT
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: echo "NIM_COMMIT=${{ inputs.nim_branch }}" >> ${GITHUB_ENV}
|
||||
run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Build Nim and Codex dependencies
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
make -j${ncpu} CI_CACHE=NimBinaries ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1 update
|
||||
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
|
||||
echo
|
||||
./env.sh nim --version
|
||||
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
Tips for shorter build times
|
||||
----------------------------
|
||||
|
||||
### Runner availability ###
|
||||
|
||||
Currently, the biggest bottleneck when optimizing workflows is the availability
|
||||
of Windows and macOS runners. Therefore, anything that reduces the time spent in
|
||||
Windows or macOS jobs will have a positive impact on the time waiting for
|
||||
runners to become available. The usage limits for Github Actions are [described
|
||||
here][limits]. You can see a breakdown of runner usage for your jobs in the
|
||||
Github Actions tab ([example][usage]).
|
||||
|
||||
### Windows is slow ###
|
||||
|
||||
Performing git operations and compilation are both slow on Windows. This can
|
||||
easily mean that a Windows job takes twice as long as a Linux job. Therefore it
|
||||
makes sense to use a Windows runner only for testing Windows compatibility, and
|
||||
nothing else. Testing compatibility with other versions of Nim, code coverage
|
||||
analysis, etc. are therefore better performed on a Linux runner.
|
||||
|
||||
### Parallelization ###
|
||||
|
||||
Breaking up a long build job into several jobs that you run in parallel can have
|
||||
a positive impact on the wall clock time that a workflow runs. For instance, you
|
||||
might consider running unit tests and integration tests in parallel. Keep in
|
||||
mind however that availability of macOS and Windows runners is the biggest
|
||||
bottleneck. If you split a Windows job into two jobs, you now need to wait for
|
||||
two Windows runners to become available! Therefore parallelization often only
|
||||
makes sense for Linux jobs.
|
||||
|
||||
### Refactoring ###
|
||||
|
||||
As with any code, complex workflows are hard to read and change. You can use
|
||||
[composite actions][composite] and [reusable workflows][reusable] to refactor
|
||||
complex workflows.
|
||||
|
||||
### Steps for measuring time
|
||||
|
||||
Breaking up steps allows you to see the time spent in each part. For instance,
|
||||
instead of having one step where all tests are performed, you might consider
|
||||
having separate steps for e.g. unit tests and integration tests, so that you can
|
||||
see how much time is spent in each.
|
||||
|
||||
### Fix slow tests ###
|
||||
|
||||
Try to avoid slow unit tests. They not only slow down continuous integration,
|
||||
but also local development. If you encounter slow tests you can consider
|
||||
reworking them to stub out the slow parts that are not under test, or use
|
||||
smaller data structures for the test.
|
||||
|
||||
You can use [unittest2][unittest2] together with the environment variable
|
||||
`NIMTEST_TIMING=true` to show how much time is spent in every test
|
||||
([reference][testtime]).
|
||||
|
||||
### Caching ###
|
||||
|
||||
Ensure that caches are updated over time. For instance if you cache the latest
|
||||
version of the Nim compiler, then you want to update the cache when a new
|
||||
version of the compiler is released. See also the documentation
|
||||
for the [cache action][cache].
|
||||
|
||||
### Fail fast ###
|
||||
|
||||
By default a workflow fails fast: if one job fails, the rest are cancelled. This
|
||||
might seem inconvenient, because when you're debugging an issue you often want
|
||||
to know whether you introduced a failure on all platforms, or only on a single
|
||||
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
|
||||
runners busy for longer on a workflow that you know is going to fail anyway.
|
||||
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
|
||||
|
||||
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
|
||||
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
|
||||
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache
|
||||
[unittest2]: https://github.com/status-im/nim-unittest2
|
||||
[testtime]: https://github.com/status-im/nim-unittest2/pull/12
|
||||
[limits]: https://docs.github.com/en/actions/learn-github-actions/usage-limits-billing-and-administration#usage-limits
|
|
@ -0,0 +1,88 @@
|
|||
name: Reusable - CI
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
matrix:
|
||||
type: string
|
||||
cache_nonce:
|
||||
default: '0'
|
||||
description: Allows for easily busting actions/cache caches
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
cache_nonce: ${{ inputs.cache_nonce }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include: ${{ fromJson(inputs.matrix) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }} {0}
|
||||
|
||||
name: '${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.tests }}'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup Nimbus Build System
|
||||
uses: ./.github/actions/nimbus-build-system
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_version: ${{ matrix.nim_version }}
|
||||
|
||||
## Part 1 Tests ##
|
||||
- name: Unit tests
|
||||
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} test
|
||||
|
||||
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.15
|
||||
|
||||
- name: Start Ethereum node with Codex contracts
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'all'
|
||||
working-directory: vendor/codex-contracts-eth
|
||||
env:
|
||||
MSYS2_PATH_TYPE: inherit
|
||||
run: |
|
||||
npm install
|
||||
npm start &
|
||||
|
||||
## Part 2 Tests ##
|
||||
- name: Contract tests
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} testContracts
|
||||
|
||||
## Part 3 Tests ##
|
||||
- name: Integration tests
|
||||
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} testIntegration
|
||||
|
||||
- name: Upload integration tests log files
|
||||
uses: actions/upload-artifact@v4
|
||||
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
|
||||
with:
|
||||
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
|
||||
path: tests/integration/logs/
|
||||
retention-days: 1
|
||||
|
||||
status:
|
||||
if: always()
|
||||
needs: [build]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }}
|
||||
run: exit 1
|
|
@ -1,106 +1,74 @@
|
|||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
include:
|
||||
- os: linux
|
||||
builder: ubuntu-latest
|
||||
shell: bash --noprofile --norc -e -o pipefail
|
||||
- os: macos
|
||||
builder: macos-latest
|
||||
shell: bash --noprofile --norc -e -o pipefail
|
||||
- os: windows
|
||||
builder: windows-latest
|
||||
shell: msys2
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }} {0}
|
||||
|
||||
name: '${{ matrix.os }}'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
timeout-minutes: 80
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ env.cache_nonce }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
|
||||
- name: Setup Nimbus Build System
|
||||
uses: ./.github/actions/nimbus-build-system
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
- name: Unit tests
|
||||
run: make -j${ncpu} test
|
||||
|
||||
- name: Start Ethereum node with Codex contracts
|
||||
working-directory: vendor/dagger-contracts
|
||||
run: |
|
||||
if [[ '${{ matrix.os }}' == 'windows' ]]; then
|
||||
export PATH="${PATH}:/c/program files/nodejs"
|
||||
fi
|
||||
npm install
|
||||
npm start &
|
||||
|
||||
- name: Contract tests
|
||||
run: make -j${ncpu} testContracts
|
||||
|
||||
- name: Integration tests
|
||||
run: make -j${ncpu} testIntegration
|
||||
build:
|
||||
needs: matrix
|
||||
uses: ./.github/workflows/ci-reusable.yml
|
||||
with:
|
||||
matrix: ${{ needs.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
||||
|
||||
coverage:
|
||||
continue-on-error: true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup Nimbus Build System
|
||||
uses: ./.github/actions/nimbus-build-system
|
||||
with:
|
||||
os: linux
|
||||
nim_version: ${{ env.nim_version }}
|
||||
|
||||
- name: Generate coverage data
|
||||
run: make -j${ncpu} coverage
|
||||
run: |
|
||||
# make -j${ncpu} coverage
|
||||
make -j${ncpu} coverage-script
|
||||
shell: bash
|
||||
|
||||
- name: Upload coverage data to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./coverage/
|
||||
fail_ci_if_error: true
|
||||
files: ./coverage/coverage.f.info
|
||||
flags: unittests
|
||||
name: codecov-umbrella
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
nim_1_2:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Nimbus Build System
|
||||
uses: ./.github/actions/nimbus-build-system
|
||||
with:
|
||||
os: linux
|
||||
nim_branch: version-1-2
|
||||
|
||||
- name: Unit tests
|
||||
run: make -j${ncpu} test
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
name: Docker - Dist-Tests
|
||||
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
- '.gitignore'
|
||||
- '.github/**'
|
||||
- '!.github/workflows/docker-dist-tests.yml'
|
||||
- '!.github/workflows/docker-reusable.yml'
|
||||
- 'docker/**'
|
||||
- '!docker/codex.Dockerfile'
|
||||
- '!docker/docker-entrypoint.sh'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
with:
|
||||
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true'
|
||||
nat_ip_auto: true
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
tag_suffix: dist-tests
|
||||
continuous_tests_list: PeersTest HoldMyBeerTest
|
||||
continuous_tests_duration: 12h
|
||||
secrets: inherit
|
|
@ -0,0 +1,267 @@
|
|||
name: Reusable - Docker
|
||||
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
docker_file:
|
||||
default: docker/codex.Dockerfile
|
||||
description: Dockerfile
|
||||
required: false
|
||||
type: string
|
||||
docker_repo:
|
||||
default: codexstorage/nim-codex
|
||||
description: DockerHub repository
|
||||
required: false
|
||||
type: string
|
||||
make_parallel:
|
||||
default: 4
|
||||
description: Make parallel
|
||||
required: false
|
||||
type: number
|
||||
nimflags:
|
||||
default: '-d:disableMarchNative'
|
||||
description: Nim flags for builds
|
||||
required: false
|
||||
type: string
|
||||
nat_ip_auto:
|
||||
default: false
|
||||
description: Enable NAT IP auto
|
||||
required: false
|
||||
type: boolean
|
||||
tag_latest:
|
||||
default: true
|
||||
description: Set latest tag for Docker images
|
||||
required: false
|
||||
type: boolean
|
||||
tag_sha:
|
||||
default: true
|
||||
description: Set Git short commit as Docker tag
|
||||
required: false
|
||||
type: boolean
|
||||
tag_suffix:
|
||||
default: ''
|
||||
description: Suffix for Docker images tag
|
||||
required: false
|
||||
type: string
|
||||
continuous_tests_list:
|
||||
default: ''
|
||||
description: Continuous Tests list
|
||||
required: false
|
||||
type: string
|
||||
continuous_tests_duration:
|
||||
default: 48h
|
||||
description: Continuous Tests duration
|
||||
required: false
|
||||
type: string
|
||||
|
||||
|
||||
env:
|
||||
# Build
|
||||
DOCKER_FILE: ${{ inputs.docker_file }}
|
||||
DOCKER_REPO: ${{ inputs.docker_repo }}
|
||||
MAKE_PARALLEL: ${{ inputs.make_parallel }}
|
||||
NIMFLAGS: ${{ inputs.nimflags }}
|
||||
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
|
||||
TAG_LATEST: ${{ inputs.tag_latest }}
|
||||
TAG_SHA: ${{ inputs.tag_sha }}
|
||||
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
||||
# Tests
|
||||
CONTINUOUS_TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
||||
CONTINUOUS_TESTS_BRANCH: master
|
||||
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
|
||||
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
|
||||
CONTINUOUS_TESTS_NAMEPREFIX: c-tests-ci
|
||||
|
||||
|
||||
jobs:
|
||||
# Build platform specific image
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: linux
|
||||
arch: arm64
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
arch: amd64
|
||||
builder: ubuntu-22.04
|
||||
- target:
|
||||
os: linux
|
||||
arch: arm64
|
||||
builder: buildjet-4vcpu-ubuntu-2204-arm
|
||||
|
||||
name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }}
|
||||
runs-on: ${{ matrix.builder }}
|
||||
env:
|
||||
PLATFORM: ${{ format('{0}/{1}', 'linux', matrix.target.arch) }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Docker - Meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.DOCKER_REPO }}
|
||||
|
||||
- name: Docker - Set up Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Docker - Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Docker - Build and Push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE }}
|
||||
platforms: ${{ env.PLATFORM }}
|
||||
push: true
|
||||
build-args: |
|
||||
MAKE_PARALLEL=${{ env.MAKE_PARALLEL }}
|
||||
NIMFLAGS=${{ env.NIMFLAGS }}
|
||||
NAT_IP_AUTO=${{ env.NAT_IP_AUTO }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
outputs: type=image,name=${{ env.DOCKER_REPO }},push-by-digest=true,name-canonical=true,push=true
|
||||
|
||||
- name: Docker - Export digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Docker - Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ matrix.target.arch }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
|
||||
# Publish multi-platform image
|
||||
publish:
|
||||
name: Publish multi-platform image
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
needs: build
|
||||
steps:
|
||||
- name: Docker - Variables
|
||||
run: |
|
||||
# Adjust custom suffix when set and
|
||||
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
|
||||
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||
fi
|
||||
# Disable SHA tags on tagged release
|
||||
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
|
||||
echo "TAG_SHA=false" >>$GITHUB_ENV
|
||||
fi
|
||||
# Handle latest and latest-custom using raw
|
||||
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
|
||||
echo "TAG_LATEST=false" >>$GITHUB_ENV
|
||||
echo "TAG_RAW=true" >>$GITHUB_ENV
|
||||
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
|
||||
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
|
||||
else
|
||||
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||
fi
|
||||
else
|
||||
echo "TAG_RAW=false" >>$GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
path: /tmp/digests
|
||||
|
||||
- name: Docker - Set up Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Docker - Meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.DOCKER_REPO }}
|
||||
flavor: |
|
||||
latest=${{ env.TAG_LATEST }}
|
||||
suffix=${{ env.TAG_SUFFIX }},onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=raw,enable=${{ env.TAG_RAW }},value=latest
|
||||
type=sha,enable=${{ env.TAG_SHA }}
|
||||
|
||||
- name: Docker - Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Docker - Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
|
||||
|
||||
- name: Docker - Inspect image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
|
||||
|
||||
|
||||
# Compute Continuous Tests inputs
|
||||
compute-tests-inputs:
|
||||
name: Compute Continuous Tests list
|
||||
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: publish
|
||||
outputs:
|
||||
source: ${{ steps.compute.outputs.source }}
|
||||
branch: ${{ steps.compute.outputs.branch }}
|
||||
codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }}
|
||||
nameprefix: ${{ steps.compute.outputs.nameprefix }}
|
||||
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
|
||||
continuous_tests_duration: ${{ steps.compute.outputs.continuous_tests_duration }}
|
||||
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
|
||||
workflow_source: ${{ steps.compute.outputs.workflow_source }}
|
||||
steps:
|
||||
- name: Compute Continuous Tests list
|
||||
id: compute
|
||||
run: |
|
||||
echo "source=${{ format('{0}/{1}', github.server_url, env.CONTINUOUS_TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
|
||||
echo "branch=${{ env.CONTINUOUS_TESTS_BRANCH }}" >> "$GITHUB_OUTPUT"
|
||||
echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||
echo "nameprefix=$(awk '{ print tolower($0) }' <<< ${{ env.CONTINUOUS_TESTS_NAMEPREFIX }})" >> "$GITHUB_OUTPUT"
|
||||
echo "continuous_tests_list=$(jq -cR 'split(" ")' <<< '${{ env.CONTINUOUS_TESTS_LIST }}')" >> "$GITHUB_OUTPUT"
|
||||
echo "continuous_tests_duration=${{ env.CONTINUOUS_TESTS_DURATION }}" >> "$GITHUB_OUTPUT"
|
||||
echo "workflow_source=${{ env.CONTINUOUS_TESTS_SOURCE }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
|
||||
# Run Continuous Tests
|
||||
run-tests:
|
||||
name: Run Continuous Tests
|
||||
needs: [publish, compute-tests-inputs]
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
tests: ${{ fromJSON(needs.compute-tests-inputs.outputs.continuous_tests_list) }}
|
||||
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
||||
with:
|
||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
|
||||
nameprefix: ${{ needs.compute-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||
tests_filter: ${{ matrix.tests }}
|
||||
tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
|
||||
secrets: inherit
|
|
@ -0,0 +1,28 @@
|
|||
name: Docker
|
||||
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
- '.gitignore'
|
||||
- '.github/**'
|
||||
- '!.github/workflows/docker.yml'
|
||||
- '!.github/workflows/docker-reusable.yml'
|
||||
- 'docker/**'
|
||||
- '!docker/codex.Dockerfile'
|
||||
- '!docker/docker-entrypoint.sh'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
with:
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
secrets: inherit
|
|
@ -0,0 +1,65 @@
|
|||
name: OpenAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'openapi.yaml'
|
||||
- '.github/workflows/docs.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- '**'
|
||||
paths:
|
||||
- 'openapi.yaml'
|
||||
- '.github/workflows/docs.yml'
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- name: Lint OpenAPI
|
||||
shell: bash
|
||||
run: npx @redocly/cli lint openapi.yaml
|
||||
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/master'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- name: Build OpenAPI
|
||||
shell: bash
|
||||
run: npx @redocly/cli build-docs openapi.yaml --output "openapi/index.html" --title "Codex API"
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: './openapi'
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: actions/deploy-pages@v4
|
|
@ -0,0 +1,30 @@
|
|||
name: Nim matrix
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ env.cache_nonce }}
|
||||
steps:
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
uses: ./.github/workflows/ci-reusable.yml
|
||||
with:
|
||||
matrix: ${{ needs.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
|
@ -0,0 +1,158 @@
|
|||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
rust_version: 1.78.0
|
||||
binary_base: codex
|
||||
build_dir: build
|
||||
nim_flags: '-d:verify_circuit=true'
|
||||
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
|
||||
|
||||
jobs:
|
||||
# Matrix
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {arm64}, builder {buildjet-4vcpu-ubuntu-2204-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
||||
|
||||
# Build
|
||||
build:
|
||||
needs: matrix
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: ${{fromJson(needs.matrix.outputs.matrix)}}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }} {0}
|
||||
|
||||
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}
|
||||
runs-on: ${{ matrix.builder }}
|
||||
timeout-minutes: 80
|
||||
steps:
|
||||
- name: Release - Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Release - Setup Nimbus Build System
|
||||
uses: ./.github/actions/nimbus-build-system
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_version: ${{ matrix.nim_version }}
|
||||
rust_version: ${{ matrix.rust_version }}
|
||||
|
||||
- name: Release - Compute binary name
|
||||
run: |
|
||||
case ${{ matrix.os }} in
|
||||
linux*) os_name="linux" ;;
|
||||
macos*) os_name="darwin" ;;
|
||||
windows*) os_name="windows" ;;
|
||||
esac
|
||||
binary="${{ env.binary_base }}-${{ github.ref_name }}-${os_name}-${{ matrix.cpu }}"
|
||||
[[ ${os_name} == "windows" ]] && binary="${binary}.exe"
|
||||
echo "binary=${binary}" >>$GITHUB_ENV
|
||||
|
||||
- name: Release - Build
|
||||
run: |
|
||||
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.binary }} ${{ env.nim_flags }}"
|
||||
|
||||
- name: Release - Libraries
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "windows" ]]; then
|
||||
for lib in ${{ env.windows_libs }}; do
|
||||
cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}"
|
||||
done
|
||||
fi
|
||||
|
||||
- name: Release - Upload build artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-${{ env.binary }}
|
||||
path: ${{ env.build_dir }}/
|
||||
retention-days: 1
|
||||
|
||||
# Release
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: success() || failure()
|
||||
steps:
|
||||
- name: Release - Download binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: release*
|
||||
merge-multiple: true
|
||||
path: /tmp/release
|
||||
|
||||
- name: Release - Compress and checksum
|
||||
run: |
|
||||
cd /tmp/release
|
||||
checksum() {
|
||||
arc="${1}"
|
||||
sha256sum "${arc}" >"${arc}.sha256"
|
||||
}
|
||||
|
||||
# Compress and prepare
|
||||
for file in *; do
|
||||
# Exclude libraries
|
||||
if [[ "${file}" != *".dll"* ]]; then
|
||||
if [[ "${file}" == *".exe"* ]]; then
|
||||
|
||||
# Windows - binary only
|
||||
arc="${file%.*}.zip"
|
||||
zip "${arc}" "${file}"
|
||||
checksum "${arc}"
|
||||
|
||||
# Windows - binary and libs
|
||||
arc="${file%.*}-libs.zip"
|
||||
zip "${arc}" "${file}" ${{ env.windows_libs }}
|
||||
rm -f "${file}" ${{ env.windows_libs }}
|
||||
checksum "${arc}"
|
||||
else
|
||||
|
||||
# Linux/macOS
|
||||
arc="${file}.tar.gz"
|
||||
chmod 755 "${file}"
|
||||
tar cfz "${arc}" "${file}"
|
||||
rm -f "${file}"
|
||||
checksum "${arc}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Release - Upload compressed artifacts and checksums
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: archives-and-checksums
|
||||
path: /tmp/release/
|
||||
retention-days: 1
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
/tmp/release/*
|
||||
make_latest: true
|
|
@ -3,6 +3,7 @@
|
|||
!*.*
|
||||
*.exe
|
||||
|
||||
!LICENSE*
|
||||
!Makefile
|
||||
|
||||
nimcache/
|
||||
|
@ -15,6 +16,8 @@ coverage/
|
|||
|
||||
# Nimble packages
|
||||
/vendor/.nimble
|
||||
/vendor/packages/
|
||||
# /vendor/*/
|
||||
|
||||
# Nimble user files
|
||||
nimble.develop
|
||||
|
@ -23,6 +26,9 @@ nimble.paths
|
|||
# vscode
|
||||
.vscode
|
||||
|
||||
# JetBrain's IDEs
|
||||
.idea
|
||||
|
||||
# Each developer can create a personal .env file with
|
||||
# local settings overrides (e.g. WEB3_URL)
|
||||
.env
|
||||
|
@ -30,3 +36,8 @@ nimble.paths
|
|||
.update.timestamp
|
||||
codex.nims
|
||||
nimbus-build-system.paths
|
||||
docker/hostdatadir
|
||||
docker/prometheus-data
|
||||
.DS_Store
|
||||
nim.cfg
|
||||
tests/integration/logs
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
branch = master
|
||||
[submodule "vendor/nim-libp2p"]
|
||||
path = vendor/nim-libp2p
|
||||
url = https://github.com/status-im/nim-libp2p.git
|
||||
url = https://github.com/vacp2p/nim-libp2p.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nimcrypto"]
|
||||
|
@ -133,10 +133,6 @@
|
|||
url = https://github.com/status-im/nim-websock.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/dagger-contracts"]
|
||||
path = vendor/dagger-contracts
|
||||
url = https://github.com/status-im/dagger-contracts
|
||||
ignore = dirty
|
||||
[submodule "vendor/nim-contract-abi"]
|
||||
path = vendor/nim-contract-abi
|
||||
url = https://github.com/status-im/nim-contract-abi
|
||||
|
@ -168,9 +164,9 @@
|
|||
[submodule "vendor/nim-leopard"]
|
||||
path = vendor/nim-leopard
|
||||
url = https://github.com/status-im/nim-leopard.git
|
||||
[submodule "vendor/nim-libp2p-dht"]
|
||||
path = vendor/nim-libp2p-dht
|
||||
url = https://github.com/status-im/nim-libp2p-dht.git
|
||||
[submodule "vendor/nim-codex-dht"]
|
||||
path = vendor/nim-codex-dht
|
||||
url = https://github.com/codex-storage/nim-codex-dht.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-datastore"]
|
||||
|
@ -182,3 +178,40 @@
|
|||
[submodule "vendor/nim-eth"]
|
||||
path = vendor/nim-eth
|
||||
url = https://github.com/status-im/nim-eth
|
||||
[submodule "vendor/codex-contracts-eth"]
|
||||
path = vendor/codex-contracts-eth
|
||||
url = https://github.com/status-im/codex-contracts-eth
|
||||
[submodule "vendor/nim-protobuf-serialization"]
|
||||
path = vendor/nim-protobuf-serialization
|
||||
url = https://github.com/status-im/nim-protobuf-serialization
|
||||
[submodule "vendor/nim-results"]
|
||||
path = vendor/nim-results
|
||||
url = https://github.com/arnetheduck/nim-results
|
||||
[submodule "vendor/nim-testutils"]
|
||||
path = vendor/nim-testutils
|
||||
url = https://github.com/status-im/nim-testutils
|
||||
[submodule "vendor/npeg"]
|
||||
path = vendor/npeg
|
||||
url = https://github.com/zevv/npeg
|
||||
[submodule "vendor/nim-poseidon2"]
|
||||
path = vendor/nim-poseidon2
|
||||
url = https://github.com/codex-storage/nim-poseidon2.git
|
||||
[submodule "vendor/constantine"]
|
||||
path = vendor/constantine
|
||||
url = https://github.com/mratsim/constantine.git
|
||||
[submodule "vendor/nim-circom-compat"]
|
||||
path = vendor/nim-circom-compat
|
||||
url = https://github.com/codex-storage/nim-circom-compat.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/codex-storage-proofs-circuits"]
|
||||
path = vendor/codex-storage-proofs-circuits
|
||||
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-serde"]
|
||||
path = vendor/nim-serde
|
||||
url = https://github.com/codex-storage/nim-serde.git
|
||||
[submodule "vendor/nim-leveldbstatic"]
|
||||
path = vendor/nim-leveldbstatic
|
||||
url = https://github.com/codex-storage/nim-leveldb.git
|
||||
|
|
135
BUILDING.md
135
BUILDING.md
|
@ -18,45 +18,57 @@
|
|||
|
||||
To build nim-codex, developer tools need to be installed and accessible in the OS.
|
||||
|
||||
Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/status-im/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work.
|
||||
Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/codex-storage/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work.
|
||||
|
||||
Other approaches may be viable. On macOS, some users may prefer [MacPorts](https://www.macports.org/) to [Homebrew](https://brew.sh/). On Windows, rather than use MSYS2, some users may prefer to install developer tools with [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/), [Scoop](https://scoop.sh/), or [Chocolatey](https://chocolatey.org/), or download installers for e.g. Make and CMake while otherwise relying on official Windows developer tools. Community contributions to these docs and our build system are welcome!
|
||||
|
||||
### Rust
|
||||
|
||||
The current implementation of Codex's zero-knowledge proving circuit requires the installation of rust v1.76.0 or greater. Be sure to install it for your OS and add it to your terminal's path such that the command `cargo --version` gives a compatible version.
|
||||
|
||||
### Linux
|
||||
|
||||
*Package manager commands may require `sudo` depending on OS setup.*
|
||||
|
||||
On a bare bones installation of Debian (or a distribution derived from Debian, such as Ubuntu), run
|
||||
|
||||
```text
|
||||
$ apt-get update && apt-get install build-essential cmake curl git
|
||||
```shell
|
||||
apt-get update && apt-get install build-essential cmake curl git rustc cargo
|
||||
```
|
||||
|
||||
Non-Debian distributions have different package managers: `apk`, `dnf`, `pacman`, `rpm`, `yum`, etc.
|
||||
|
||||
For example, on a bare bones installation of Fedora, run
|
||||
|
||||
```text
|
||||
$ dnf install @development-tools cmake gcc-c++ which
|
||||
```shell
|
||||
dnf install @development-tools cmake gcc-c++ rust cargo
|
||||
```
|
||||
|
||||
In case your distribution does not provide required Rust version, we may install it using [rustup](https://www.rust-lang.org/tools/install)
|
||||
```shell
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=1.76.0 -y
|
||||
|
||||
. "$HOME/.cargo/env"
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
Install the [Xcode Command Line Tools](https://mac.install.guide/commandlinetools/index.html) by opening a terminal and running
|
||||
```text
|
||||
$ xcode-select --install
|
||||
```shell
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
Install [Homebrew (`brew`)](https://brew.sh/) and in a new terminal run
|
||||
```text
|
||||
$ brew install bash cmake
|
||||
```shell
|
||||
brew install bash cmake rust
|
||||
```
|
||||
|
||||
Check that `PATH` is setup correctly
|
||||
```text
|
||||
$ which bash cmake
|
||||
/usr/local/bin/bash
|
||||
/usr/local/bin/cmake
|
||||
```shell
|
||||
which bash cmake
|
||||
|
||||
# /usr/local/bin/bash
|
||||
# /usr/local/bin/cmake
|
||||
```
|
||||
|
||||
### Windows + MSYS2
|
||||
|
@ -68,14 +80,40 @@ Download and run the installer from [msys2.org](https://www.msys2.org/).
|
|||
Launch an MSYS2 [environment](https://www.msys2.org/docs/environments/). UCRT64 is generally recommended: from the Windows *Start menu* select `MSYS2 MinGW UCRT x64`.
|
||||
|
||||
Assuming a UCRT64 environment, in Bash run
|
||||
```text
|
||||
$ pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake
|
||||
```shell
|
||||
pacman -Suy
|
||||
pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-rust
|
||||
```
|
||||
|
||||
<!-- #### Headless Windows container -->
|
||||
<!-- add instructions re: getting setup with MSYS2 in a Windows container -->
|
||||
<!-- https://github.com/StefanScherer/windows-docker-machine -->
|
||||
|
||||
#### Optional: VSCode Terminal integration
|
||||
|
||||
You can link the MSYS2-UCRT64 terminal into VSCode by modifying the configuration file as shown below.
|
||||
File: `C:/Users/<username>/AppData/Roaming/Code/User/settings.json`
|
||||
```json
|
||||
{
|
||||
...
|
||||
"terminal.integrated.profiles.windows": {
|
||||
...
|
||||
"MSYS2-UCRT64": {
|
||||
"path": "C:\\msys64\\usr\\bin\\bash.exe",
|
||||
"args": [
|
||||
"--login",
|
||||
"-i"
|
||||
],
|
||||
"env": {
|
||||
"MSYSTEM": "UCRT64",
|
||||
"CHERE_INVOKING": "1",
|
||||
"MSYS2_PATH_TYPE": "inherit"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Other
|
||||
|
||||
It is possible that nim-codex can be built and run on other platforms supported by the [Nim](https://nim-lang.org/) language: BSD family, older versions of Windows, etc. There has not been sufficient experimentation with nim-codex on such platforms, so instructions are not provided. Community contributions to these docs and our build system are welcome!
|
||||
|
@ -83,30 +121,30 @@ It is possible that nim-codex can be built and run on other platforms supported
|
|||
## Repository
|
||||
|
||||
In Bash run
|
||||
```text
|
||||
$ git clone https://github.com/status-im/nim-codex.git repos/nim-codex && cd repos/nim-codex
|
||||
```shell
|
||||
git clone https://github.com/codex-storage/nim-codex.git repos/nim-codex && cd repos/nim-codex
|
||||
```
|
||||
|
||||
nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system#readme), so next run
|
||||
```text
|
||||
$ make update
|
||||
nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system), so next run
|
||||
```shell
|
||||
make update
|
||||
```
|
||||
|
||||
This step can take a while to complete because by default it builds the [Nim compiler](https://nim-lang.org/docs/nimc.html).
|
||||
|
||||
To see more output from `make` pass `V=1`. This works for all `make` targets in projects using the nimbus-build-system
|
||||
```text
|
||||
$ make V=1 update
|
||||
```shell
|
||||
make V=1 update
|
||||
```
|
||||
|
||||
## Executable
|
||||
|
||||
In Bash run
|
||||
```text
|
||||
$ make exec
|
||||
```shell
|
||||
make
|
||||
```
|
||||
|
||||
The `exec` target creates the `build/codex` executable.
|
||||
The default `make` target creates the `build/codex` executable.
|
||||
|
||||
## Example usage
|
||||
|
||||
|
@ -115,29 +153,40 @@ See the [instructions](README.md#cli-options) in the main readme.
|
|||
## Tests
|
||||
|
||||
In Bash run
|
||||
```text
|
||||
$ make test
|
||||
```shell
|
||||
make test
|
||||
```
|
||||
|
||||
### testAll
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
To run the integration tests, an Ethereum test node is required. Follow these instructions to set it up.
|
||||
|
||||
##### Windows (do this before 'All platforms')
|
||||
|
||||
1. Download and install Visual Studio 2017 or newer. (Not VSCode!) In the Workloads overview, enable `Desktop development with C++`. ( https://visualstudio.microsoft.com )
|
||||
|
||||
##### All platforms
|
||||
|
||||
1. Install NodeJS (tested with v18.14.0), consider using NVM as a version manager. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme)
|
||||
1. Open a terminal
|
||||
1. Go to the vendor/codex-contracts-eth folder: `cd /<git-root>/vendor/codex-contracts-eth/`
|
||||
1. `npm install` -> Should complete with the number of packages added and an overview of known vulnerabilities.
|
||||
1. `npm test` -> Should output test results. May take a minute.
|
||||
|
||||
Before the integration tests are started, you must start the Ethereum test node manually.
|
||||
1. Open a terminal
|
||||
1. Go to the vendor/codex-contracts-eth folder: `cd /<git-root>/vendor/codex-contracts-eth/`
|
||||
1. `npm start` -> This should launch Hardhat, and output a number of keys and a warning message.
|
||||
|
||||
#### Run
|
||||
|
||||
The `testAll` target runs the same tests as `make test` and also runs tests for nim-codex's Ethereum contracts, as well a basic suite of integration tests.
|
||||
|
||||
To run `make testAll`, Node.js needs to be installed. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme) is a flexible means to do that and it works on Linux, macOS, and Windows + MSYS2.
|
||||
To run `make testAll`.
|
||||
|
||||
With `nvm` installed, launch a separate terminal and download the latest LTS version of Node.js
|
||||
```text
|
||||
$ nvm install --lts
|
||||
```
|
||||
|
||||
In that same terminal run
|
||||
```text
|
||||
$ cd repos/nim-codex/vendor/dagger-contracts && npm install && npm start
|
||||
```
|
||||
|
||||
Those commands install and launch a [Hardhat](https://hardhat.org/) environment with nim-codex's Ethereum contracts.
|
||||
|
||||
In the other terminal run
|
||||
```text
|
||||
$ make testAll
|
||||
Use a new terminal to run:
|
||||
```shell
|
||||
make testAll
|
||||
```
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2024 Codex Storage
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,199 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,19 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
62
Makefile
62
Makefile
|
@ -5,6 +5,30 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
# This is the Nim version used locally and in regular CI builds.
|
||||
# Can be a specific version tag, a branch name, or a commit hash.
|
||||
# Can be overridden by setting the NIM_COMMIT environment variable
|
||||
# before calling make.
|
||||
#
|
||||
# For readability in CI, if NIM_COMMIT is set to "pinned",
|
||||
# this will also default to the version pinned here.
|
||||
#
|
||||
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
||||
# version pinned by nimbus-build-system.
|
||||
PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
||||
|
||||
ifeq ($(NIM_COMMIT),)
|
||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||
else ifeq ($(NIM_COMMIT),pinned)
|
||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||
endif
|
||||
|
||||
ifeq ($(NIM_COMMIT),nimbusbuild)
|
||||
undefine NIM_COMMIT
|
||||
else
|
||||
export NIM_COMMIT
|
||||
endif
|
||||
|
||||
SHELL := bash # the shell used internally by Make
|
||||
|
||||
# used inside the included makefiles
|
||||
|
@ -44,7 +68,11 @@ GIT_SUBMODULE_UPDATE := git submodule update --init --recursive
|
|||
else # "variables.mk" was included. Business as usual until the end of this file.
|
||||
|
||||
# default target, because it's the first one that doesn't start with '.'
|
||||
all: | test
|
||||
|
||||
# Builds the codex binary
|
||||
all: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
|
||||
|
||||
# must be included after the default target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
|
||||
|
@ -56,15 +84,12 @@ else
|
|||
NIM_PARAMS := $(NIM_PARAMS) -d:release
|
||||
endif
|
||||
|
||||
deps: | deps-common nat-libs codex.nims
|
||||
deps: | deps-common nat-libs
|
||||
ifneq ($(USE_LIBBACKTRACE), 0)
|
||||
deps: | libbacktrace
|
||||
endif
|
||||
|
||||
#- deletes and recreates "codex.nims" which on Windows is a copy instead of a proper symlink
|
||||
update: | update-common
|
||||
rm -rf codex.nims && \
|
||||
$(MAKE) codex.nims $(HANDLE_OUTPUT)
|
||||
|
||||
# detecting the os
|
||||
ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10...
|
||||
|
@ -79,31 +104,27 @@ endif
|
|||
# Builds and run a part of the test suite
|
||||
test: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs the smart contract tests
|
||||
testContracts: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs the integration tests
|
||||
testIntegration: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs all tests
|
||||
# Builds and runs all tests (except for Taiko L2 tests)
|
||||
testAll: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds the codex binary
|
||||
exec: | build deps
|
||||
# Builds and runs Taiko L2 tests
|
||||
testTaiko: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) codex.nims
|
||||
|
||||
# symlink
|
||||
codex.nims:
|
||||
ln -s codex.nimble $@
|
||||
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) codex.nims
|
||||
|
||||
# nim-libbacktrace
|
||||
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
||||
|
@ -128,8 +149,15 @@ coverage:
|
|||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
||||
|
||||
show-coverage:
|
||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||
|
||||
coverage-script: build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim coverage $(NIM_PARAMS) build.nims
|
||||
echo "Run `make show-coverage` to view coverage results"
|
||||
|
||||
# usual cleaning
|
||||
clean: | clean-common
|
||||
rm -rf build
|
||||
|
|
178
README.md
178
README.md
|
@ -7,9 +7,11 @@
|
|||
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
|
||||
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
|
||||
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
|
||||
[![CI](https://github.com/status-im/nim-codex/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/status-im/nim-codex/actions?query=workflow%3ACI+branch%3Amain)
|
||||
[![Codecov](https://codecov.io/gh/status-im/nim-codex/branch/main/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/status-im/nim-codex)
|
||||
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
|
||||
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
|
||||
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex)
|
||||
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
|
||||
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
|
||||
|
||||
|
||||
## Build and Run
|
||||
|
@ -19,7 +21,7 @@ For detailed instructions on preparing to build nim-codex see [*Building Codex*]
|
|||
To build the project, clone it and run:
|
||||
|
||||
```bash
|
||||
make update && make exec
|
||||
make update && make
|
||||
```
|
||||
|
||||
The executable will be placed under the `build` directory under the project root.
|
||||
|
@ -29,6 +31,35 @@ Run the client with:
|
|||
```bash
|
||||
build/codex
|
||||
```
|
||||
## Configuration
|
||||
|
||||
It is possible to configure a Codex node in several ways:
|
||||
1. CLI options
|
||||
2. Env. variable
|
||||
3. Config
|
||||
|
||||
The order of priority is the same as above: Cli arguments > Env variables > Config file values.
|
||||
|
||||
### Environment variables
|
||||
|
||||
In order to set a configuration option using environment variables, first find the desired CLI option
|
||||
and then transform it in the following way:
|
||||
|
||||
1. prepend it with `CODEX_`
|
||||
2. make it uppercase
|
||||
3. replace `-` with `_`
|
||||
|
||||
For example, to configure `--log-level`, use `CODEX_LOG_LEVEL` as the environment variable name.
|
||||
|
||||
### Configuration file
|
||||
|
||||
A [TOML](https://toml.io/en/) configuration file can also be used to set configuration values. Configuration option names and corresponding values are placed in the file, separated by `=`. Configuration option names can be obtained from the `codex --help` command, and should not include the `--` prefix. For example, a node's log level (`--log-level`) can be configured using TOML as follows:
|
||||
|
||||
```toml
|
||||
log-level = "trace"
|
||||
```
|
||||
|
||||
The Codex node can then read the configuration from this file using the `--config-file` CLI parameter, like `codex --config-file=/path/to/your/config.toml`.
|
||||
|
||||
### CLI Options
|
||||
|
||||
|
@ -40,104 +71,77 @@ codex [OPTIONS]... command
|
|||
|
||||
The following options are available:
|
||||
|
||||
--log-level Sets the log level [=LogLevel.INFO].
|
||||
--config-file Loads the configuration from a TOML file [=none].
|
||||
--log-level Sets the log level [=info].
|
||||
--metrics Enable the metrics server [=false].
|
||||
--metrics-address Listening address of the metrics server [=127.0.0.1].
|
||||
--metrics-port Listening HTTP port of the metrics server [=8008].
|
||||
-d, --data-dir The directory where codex will store configuration and data..
|
||||
-l, --listen-port Specifies one or more listening ports for the node to listen on. [=0].
|
||||
-i, --listen-ip The public IP [=0.0.0.0].
|
||||
--udp-port Specify the discovery (UDP) port [=8090].
|
||||
--net-privkey Source of network (secp256k1) private key file (random|<path>) [=random].
|
||||
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network..
|
||||
-d, --data-dir The directory where codex will store configuration and data.
|
||||
-i, --listen-addrs Multi Addresses to listen on [=/ip4/0.0.0.0/tcp/0].
|
||||
-a, --nat IP Addresses to announce behind a NAT [=127.0.0.1].
|
||||
-e, --disc-ip Discovery listen address [=0.0.0.0].
|
||||
-u, --disc-port Discovery (UDP) port [=8090].
|
||||
--net-privkey Source of network (secp256k1) private key file path or name [=key].
|
||||
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network.
|
||||
--max-peers The maximum number of peers to connect to [=160].
|
||||
--agent-string Node agent string which is used as identifier in network [=Codex].
|
||||
--api-bindaddr The REST API bind address [=127.0.0.1].
|
||||
-p, --api-port The REST Api port [=8080].
|
||||
-c, --cache-size The size in MiB of the block cache, 0 disables the cache [=100].
|
||||
--persistence Enables persistence mechanism, requires an Ethereum node [=false].
|
||||
--eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545].
|
||||
--eth-account The Ethereum account that is used for storage contracts [=EthAddress.none].
|
||||
--eth-deployment The json file describing the contract deployment [=string.none].
|
||||
--repo-kind Backend for main repo store (fs, sqlite) [=fs].
|
||||
-q, --storage-quota The size of the total storage quota dedicated to the node [=8589934592].
|
||||
-t, --block-ttl Default block timeout in seconds - 0 disables the ttl [=$DefaultBlockTtl].
|
||||
--block-mi Time interval in seconds - determines frequency of block maintenance cycle: how
|
||||
often blocks are checked for expiration and cleanup
|
||||
[=$DefaultBlockMaintenanceInterval].
|
||||
--block-mn Number of blocks to check every maintenance cycle [=1000].
|
||||
-c, --cache-size The size of the block cache, 0 disables the cache - might help on slow hardrives
|
||||
[=0].
|
||||
|
||||
Available sub-commands:
|
||||
|
||||
codex initNode
|
||||
codex persistence [OPTIONS]... command
|
||||
|
||||
The following options are available:
|
||||
|
||||
--eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545].
|
||||
--eth-account The Ethereum account that is used for storage contracts.
|
||||
--eth-private-key File containing Ethereum private key for storage contracts.
|
||||
--marketplace-address Address of deployed Marketplace contract.
|
||||
--validator Enables validator, requires an Ethereum node [=false].
|
||||
--validator-max-slots Maximum number of slots that the validator monitors [=1000].
|
||||
|
||||
Available sub-commands:
|
||||
|
||||
codex persistence prover [OPTIONS]...
|
||||
|
||||
The following options are available:
|
||||
|
||||
--circom-r1cs The r1cs file for the storage circuit.
|
||||
--circom-wasm The wasm file for the storage circuit.
|
||||
--circom-zkey The zkey file for the storage circuit.
|
||||
--circom-no-zkey Ignore the zkey file - use only for testing! [=false].
|
||||
--proof-samples Number of samples to prove [=5].
|
||||
--max-slot-depth The maximum depth of the slot tree [=32].
|
||||
--max-dataset-depth The maximum depth of the dataset tree [=8].
|
||||
--max-block-depth The maximum depth of the network block merkle tree [=5].
|
||||
--max-cell-elements The maximum number of elements in a cell [=67].
|
||||
```
|
||||
|
||||
### Example: running two Codex clients
|
||||
#### Logging
|
||||
|
||||
```bash
|
||||
build/codex --data-dir="$(pwd)/Codex1" -i=127.0.0.1
|
||||
```
|
||||
Codex uses [Chronicles](https://github.com/status-im/nim-chronicles) logging library, which allows great flexibility in working with logs.
|
||||
Chronicles has the concept of topics, which categorize log entries into semantic groups.
|
||||
|
||||
This will start codex with a data directory pointing to `Codex1` under the current execution directory and announce itself on the DHT under `127.0.0.1`.
|
||||
Using the `log-level` parameter, you can set the top-level log level like `--log-level="trace"`, but more importantly,
|
||||
you can set log levels for specific topics like `--log-level="info; trace: marketplace,node; error: blockexchange"`,
|
||||
which sets the top-level log level to `info` and then for topics `marketplace` and `node` sets the level to `trace` and so on.
|
||||
|
||||
To run a second client that automatically discovers nodes on the network, we need to get the Signed Peer Record (SPR) of first client, Client1. We can do this by querying the `/info` endpoint of the node's REST API.
|
||||
### Guides
|
||||
|
||||
`curl http://127.0.0.1:8080/api/codex/v1/info`
|
||||
To get acquainted with Codex, consider:
|
||||
* running the simple [Codex Two-Client Test](docs/TwoClientTest.md) for a start, and;
|
||||
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](docs/Marketplace.md) using a local blockchain as well.
|
||||
|
||||
This should output information about Client1, including its PeerID, TCP/UDP addresses, data directory, and SPR:
|
||||
## API
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "16Uiu2HAm92LGXYTuhtLaZzkFnsCx6FFJsNmswK6o9oPXFbSKHQEa",
|
||||
"addrs": [
|
||||
"/ip4/0.0.0.0/udp/8090",
|
||||
"/ip4/0.0.0.0/tcp/49336"
|
||||
],
|
||||
"repo": "/repos/status-im/nim-codex/Codex1",
|
||||
"spr": "spr:CiUIAhIhAmqg5fVU2yxPStLdUOWgwrkWZMHW2MHf6i6l8IjA4tssEgIDARpICicAJQgCEiECaqDl9VTbLE9K0t1Q5aDCuRZkwdbYwd_qLqXwiMDi2ywQ5v2VlAYaCwoJBH8AAAGRAh-aGgoKCAR_AAABBts3KkcwRQIhAPOKl38CviplVbMVnA_9q3N1K_nk5oGuNp7DWeOqiJzzAiATQ2acPyQvPxLU9YS-TiVo4RUXndRcwMFMX2Yjhw8k3A"
|
||||
}
|
||||
```
|
||||
|
||||
Now, let's start a second client, Client2. Because we're already using the default ports TCP (:8080) and UDP (:8090) for the first client, we have to specify new ports to avoid a collision. Additionally, we can specify the SPR from Client1 as the bootstrap node for discovery purposes, allowing Client2 to determine where content is located in the network.
|
||||
|
||||
```bash
|
||||
build/codex --data-dir="$(pwd)/Codex2" -i=127.0.0.1 --api-port=8081 --udp-port=8091 --bootstrap-node=spr:CiUIAhIhAmqg5fVU2yxPStLdUOWgwrkWZMHW2MHf6i6l8IjA4tssEgIDARpICicAJQgCEiECaqDl9VTbLE9K0t1Q5aDCuRZkwdbYwd_qLqXwiMDi2ywQ5v2VlAYaCwoJBH8AAAGRAh-aGgoKCAR_AAABBts3KkcwRQIhAPOKl38CviplVbMVnA_9q3N1K_nk5oGuNp7DWeOqiJzzAiATQ2acPyQvPxLU9YS-TiVo4RUXndRcwMFMX2Yjhw8k3A
|
||||
```
|
||||
|
||||
There are now two clients running. We could upload a file to Client1 and download that file (given its CID) using Client2, by using the clients' REST API.
|
||||
|
||||
## Interacting with the client
|
||||
|
||||
The client exposes a REST API that can be used to interact with the clients. These commands could be invoked with any HTTP client, however the following endpoints assume the use of the `curl` command.
|
||||
|
||||
### `/api/codex/v1/connect/{peerId}`
|
||||
|
||||
Connect to a peer identified by its peer id. Takes an optional `addrs` parameter with a list of valid [multiaddresses](https://multiformats.io/multiaddr/). If `addrs` is absent, the peer will be discovered over the DHT.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl "127.0.0.1:8080/api/codex/v1/connect/<peer id>?addrs=<multiaddress>"
|
||||
```
|
||||
|
||||
### `/api/codex/v1/download/{id}`
|
||||
|
||||
Download data identified by a `Cid`.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl -vvv "127.0.0.1:8080/api/codex/v1/download/<Cid of the content>" --output <name of output file>
|
||||
```
|
||||
|
||||
### `/api/codex/v1/upload`
|
||||
|
||||
Upload a file, upon success returns the `Cid` of the uploaded file.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl -vvv -H "content-type: application/octet-stream" -H Expect: -T "<path to file>" "127.0.0.1:8080/api/codex/v1/upload" -X POST
|
||||
```
|
||||
|
||||
### `/api/codex/v1/info`
|
||||
|
||||
Get useful node info such as its peer id, address and SPR.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl -vvv "127.0.0.1:8080/api/codex/v1/info"
|
||||
```
|
||||
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
ceremony
|
||||
circuit_bench_*
|
|
@ -0,0 +1,33 @@
|
|||
|
||||
## Benchmark Runner
|
||||
|
||||
Modify `runAllBenchmarks` proc in `run_benchmarks.nim` to the desired parameters and variations.
|
||||
|
||||
Then run it:
|
||||
|
||||
```sh
|
||||
nim c -r run_benchmarks
|
||||
```
|
||||
|
||||
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
|
||||
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
||||
|
||||
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
|
||||
|
||||
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
|
||||
|
||||
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
||||
|
||||
## Codex Ark Circom CLI
|
||||
|
||||
Runs Codex's prover setup with Ark / Circom.
|
||||
|
||||
Compile:
|
||||
```sh
|
||||
nim c codex_ark_prover_cli.nim
|
||||
```
|
||||
|
||||
Run to see usage:
|
||||
```sh
|
||||
./codex_ark_prover_cli.nim -h
|
||||
```
|
|
@ -0,0 +1,15 @@
|
|||
--path:
|
||||
".."
|
||||
--path:
|
||||
"../tests"
|
||||
--threads:
|
||||
on
|
||||
--tlsEmulation:
|
||||
off
|
||||
--d:
|
||||
release
|
||||
|
||||
# when not defined(chronicles_log_level):
|
||||
# --define:"chronicles_log_level:NONE" # compile all log statements
|
||||
# --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime
|
||||
# --"import":"logging" # ensure that logging is ignored at runtime
|
|
@ -0,0 +1,187 @@
|
|||
import std/[hashes, json, strutils, strformat, os, osproc, uri]
|
||||
|
||||
import ./utils
|
||||
|
||||
type
|
||||
CircuitEnv* = object
|
||||
nimCircuitCli*: string
|
||||
circuitDirIncludes*: string
|
||||
ptauPath*: string
|
||||
ptauUrl*: Uri
|
||||
codexProjDir*: string
|
||||
|
||||
CircuitArgs* = object
|
||||
depth*: int
|
||||
maxslots*: int
|
||||
cellsize*: int
|
||||
blocksize*: int
|
||||
nsamples*: int
|
||||
entropy*: int
|
||||
seed*: int
|
||||
nslots*: int
|
||||
ncells*: int
|
||||
index*: int
|
||||
|
||||
proc findCodexProjectDir(): string =
|
||||
## find codex proj dir -- assumes this script is in codex/benchmarks
|
||||
result = currentSourcePath().parentDir.parentDir
|
||||
|
||||
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
|
||||
let codexDir = findCodexProjectDir()
|
||||
result.nimCircuitCli =
|
||||
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
|
||||
"proof_input" / "cli"
|
||||
result.circuitDirIncludes =
|
||||
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
|
||||
result.ptauPath =
|
||||
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
|
||||
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
|
||||
result.codexProjDir = codexDir
|
||||
|
||||
proc check*(env: var CircuitEnv) =
|
||||
## check that the CWD of script is in the codex parent
|
||||
let codexProjDir = findCodexProjectDir()
|
||||
echo "\n\nFound project dir: ", codexProjDir
|
||||
|
||||
let snarkjs = findExe("snarkjs")
|
||||
if snarkjs == "":
|
||||
echo dedent"""
|
||||
ERROR: must install snarkjs first
|
||||
|
||||
npm install -g snarkjs@latest
|
||||
"""
|
||||
|
||||
let circom = findExe("circom")
|
||||
if circom == "":
|
||||
echo dedent"""
|
||||
ERROR: must install circom first
|
||||
|
||||
git clone https://github.com/iden3/circom.git
|
||||
cargo install --path circom
|
||||
"""
|
||||
|
||||
if snarkjs == "" or circom == "":
|
||||
quit 2
|
||||
|
||||
echo "Found SnarkJS: ", snarkjs
|
||||
echo "Found Circom: ", circom
|
||||
|
||||
if not env.nimCircuitCli.fileExists:
|
||||
echo "Nim Circuit reference cli not found: ", env.nimCircuitCli
|
||||
echo "Building Circuit reference cli...\n"
|
||||
withDir env.nimCircuitCli.parentDir:
|
||||
runit "nimble build -d:release --styleCheck:off cli"
|
||||
echo "CWD: ", getCurrentDir()
|
||||
assert env.nimCircuitCli.fileExists()
|
||||
|
||||
echo "Found NimCircuitCli: ", env.nimCircuitCli
|
||||
echo "Found Circuit Path: ", env.circuitDirIncludes
|
||||
echo "Found PTAU file: ", env.ptauPath
|
||||
|
||||
proc downloadPtau*(ptauPath: string, ptauUrl: Uri) =
|
||||
## download ptau file using curl if needed
|
||||
if not ptauPath.fileExists:
|
||||
echo "Ceremony file not found, downloading..."
|
||||
createDir ptauPath.parentDir
|
||||
withDir ptauPath.parentDir:
|
||||
runit fmt"curl --output '{ptauPath}' '{$ptauUrl}/{ptauPath.splitPath().tail}'"
|
||||
else:
|
||||
echo "Found PTAU file at: ", ptauPath
|
||||
|
||||
proc getCircuitBenchStr*(args: CircuitArgs): string =
|
||||
for f, v in fieldPairs(args):
|
||||
result &= "_" & f & $v
|
||||
|
||||
proc getCircuitBenchPath*(args: CircuitArgs, env: CircuitEnv): string =
|
||||
## generate folder name for unique circuit args
|
||||
result = env.codexProjDir / "benchmarks/circuit_bench" & getCircuitBenchStr(args)
|
||||
|
||||
proc generateCircomAndSamples*(args: CircuitArgs, env: CircuitEnv, name: string) =
|
||||
## run nim circuit and sample generator
|
||||
var cliCmd = env.nimCircuitCli
|
||||
for f, v in fieldPairs(args):
|
||||
cliCmd &= " --" & f & "=" & $v
|
||||
|
||||
if not "input.json".fileExists:
|
||||
echo "Generating Circom Files..."
|
||||
runit fmt"{cliCmd} -v --circom={name}.circom --output=input.json"
|
||||
|
||||
proc createCircuit*(
|
||||
args: CircuitArgs,
|
||||
env: CircuitEnv,
|
||||
name = "proof_main",
|
||||
circBenchDir = getCircuitBenchPath(args, env),
|
||||
someEntropy = "some_entropy_75289v3b7rcawcsyiur",
|
||||
doGenerateWitness = false,
|
||||
): tuple[dir: string, name: string] =
|
||||
## Generates all the files needed for to run a proof circuit. Downloads the PTAU file if needed.
|
||||
##
|
||||
## All needed circuit files will be generated as needed.
|
||||
## They will be located in `circBenchDir` which defaults to a folder like:
|
||||
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
||||
## with all the given CircuitArgs.
|
||||
##
|
||||
let circdir = circBenchDir
|
||||
|
||||
downloadPtau env.ptauPath, env.ptauUrl
|
||||
|
||||
echo "Creating circuit dir: ", circdir
|
||||
createDir circdir
|
||||
withDir circdir:
|
||||
writeFile("circuit_params.json", pretty(%*args))
|
||||
let
|
||||
inputs = circdir / "input.json"
|
||||
zkey = circdir / fmt"{name}.zkey"
|
||||
wasm = circdir / fmt"{name}.wasm"
|
||||
r1cs = circdir / fmt"{name}.r1cs"
|
||||
wtns = circdir / fmt"{name}.wtns"
|
||||
|
||||
generateCircomAndSamples(args, env, name)
|
||||
|
||||
if not wasm.fileExists or not r1cs.fileExists:
|
||||
runit fmt"circom --r1cs --wasm --O2 -l{env.circuitDirIncludes} {name}.circom"
|
||||
moveFile fmt"{name}_js" / fmt"{name}.wasm", fmt"{name}.wasm"
|
||||
echo "Found wasm: ", wasm
|
||||
echo "Found r1cs: ", r1cs
|
||||
|
||||
if not zkey.fileExists:
|
||||
echo "ZKey not found, generating..."
|
||||
putEnv "NODE_OPTIONS", "--max-old-space-size=8192"
|
||||
if not fmt"{name}_0000.zkey".fileExists:
|
||||
runit fmt"snarkjs groth16 setup {r1cs} {env.ptauPath} {name}_0000.zkey"
|
||||
echo fmt"Generated {name}_0000.zkey"
|
||||
|
||||
let cmd =
|
||||
fmt"snarkjs zkey contribute {name}_0000.zkey {name}_0001.zkey --name='1st Contributor Name'"
|
||||
echo "CMD: ", cmd
|
||||
let cmdRes = execCmdEx(cmd, options = {}, input = someEntropy & "\n")
|
||||
assert cmdRes.exitCode == 0
|
||||
|
||||
moveFile fmt"{name}_0001.zkey", fmt"{name}.zkey"
|
||||
removeFile fmt"{name}_0000.zkey"
|
||||
|
||||
if not wtns.fileExists and doGenerateWitness:
|
||||
runit fmt"node generate_witness.js {wtns} ../input.json ../witness.wtns"
|
||||
|
||||
return (circdir, name)
|
||||
|
||||
when isMainModule:
|
||||
echo "findCodexProjectDir: ", findCodexProjectDir()
|
||||
## test run creating a circuit
|
||||
var env = CircuitEnv.default()
|
||||
env.check()
|
||||
|
||||
let args = CircuitArgs(
|
||||
depth: 32, # maximum depth of the slot tree
|
||||
maxslots: 256, # maximum number of slots
|
||||
cellsize: 2048, # cell size in bytes
|
||||
blocksize: 65536, # block size in bytes
|
||||
nsamples: 5, # number of samples to prove
|
||||
entropy: 1234567, # external randomness
|
||||
seed: 12345, # seed for creating fake data
|
||||
nslots: 11, # number of slots in the dataset
|
||||
index: 3, # which slot we prove (0..NSLOTS-1)
|
||||
ncells: 512, # number of cells in this slot
|
||||
)
|
||||
let benchenv = createCircuit(args, env)
|
||||
echo "\nBench dir:\n", benchenv
|
|
@ -0,0 +1,105 @@
|
|||
import std/[sequtils, strformat, os, options, importutils]
|
||||
import std/[times, os, strutils, terminal]
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/datastore
|
||||
|
||||
import pkg/codex/[rng, stores, merkletree, codextypes, slots]
|
||||
import pkg/codex/utils/[json, poseidon2digest]
|
||||
import pkg/codex/slots/[builder, sampler/utils, backends/helpers]
|
||||
import pkg/constantine/math/[arithmetic, io/io_bigints, io/io_fields]
|
||||
|
||||
import ./utils
|
||||
import ./create_circuits
|
||||
|
||||
type CircuitFiles* = object
|
||||
r1cs*: string
|
||||
wasm*: string
|
||||
zkey*: string
|
||||
inputs*: string
|
||||
|
||||
proc runArkCircom(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
|
||||
echo "Loading sample proof..."
|
||||
var
|
||||
inputData = files.inputs.readFile()
|
||||
inputJson = !JsonNode.parse(inputData)
|
||||
proofInputs = Poseidon2Hash.jsonToProofInput(inputJson)
|
||||
circom = CircomCompat.init(
|
||||
files.r1cs,
|
||||
files.wasm,
|
||||
files.zkey,
|
||||
slotDepth = args.depth,
|
||||
numSamples = args.nsamples,
|
||||
)
|
||||
defer:
|
||||
circom.release() # this comes from the rust FFI
|
||||
|
||||
echo "Sample proof loaded..."
|
||||
echo "Proving..."
|
||||
|
||||
let nameArgs = getCircuitBenchStr(args)
|
||||
var proof: CircomProof
|
||||
benchmark fmt"prover-{nameArgs}", benchmarkLoops:
|
||||
proof = circom.prove(proofInputs).tryGet
|
||||
|
||||
var verRes: bool
|
||||
benchmark fmt"verify-{nameArgs}", benchmarkLoops:
|
||||
verRes = circom.verify(proof, proofInputs).tryGet
|
||||
echo "verify result: ", verRes
|
||||
|
||||
proc runRapidSnark(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
|
||||
# time rapidsnark ${CIRCUIT_MAIN}.zkey witness.wtns proof.json public.json
|
||||
|
||||
echo "generating the witness..."
|
||||
## TODO
|
||||
|
||||
proc runBenchmark(args: CircuitArgs, env: CircuitEnv, benchmarkLoops: int) =
|
||||
## execute benchmarks given a set of args
|
||||
## will create a folder in `benchmarks/circuit_bench_$(args)`
|
||||
##
|
||||
|
||||
let env = createCircuit(args, env)
|
||||
|
||||
## TODO: copy over testcircomcompat proving
|
||||
let files = CircuitFiles(
|
||||
r1cs: env.dir / fmt"{env.name}.r1cs",
|
||||
wasm: env.dir / fmt"{env.name}.wasm",
|
||||
zkey: env.dir / fmt"{env.name}.zkey",
|
||||
inputs: env.dir / fmt"input.json",
|
||||
)
|
||||
|
||||
runArkCircom(args, files, benchmarkLoops)
|
||||
|
||||
proc runAllBenchmarks*() =
|
||||
echo "Running benchmark"
|
||||
# setup()
|
||||
var env = CircuitEnv.default()
|
||||
env.check()
|
||||
|
||||
var args = CircuitArgs(
|
||||
depth: 32, # maximum depth of the slot tree
|
||||
maxslots: 256, # maximum number of slots
|
||||
cellsize: 2048, # cell size in bytes
|
||||
blocksize: 65536, # block size in bytes
|
||||
nsamples: 1, # number of samples to prove
|
||||
entropy: 1234567, # external randomness
|
||||
seed: 12345, # seed for creating fake data
|
||||
nslots: 11, # number of slots in the dataset
|
||||
index: 3, # which slot we prove (0..NSLOTS-1)
|
||||
ncells: 512, # number of cells in this slot
|
||||
)
|
||||
|
||||
let
|
||||
numberSamples = 3
|
||||
benchmarkLoops = 5
|
||||
|
||||
for i in 1 .. numberSamples:
|
||||
args.nsamples = i
|
||||
stdout.styledWriteLine(fgYellow, "\nbenchmarking args: ", $args)
|
||||
runBenchmark(args, env, benchmarkLoops)
|
||||
|
||||
printBenchMarkSummaries()
|
||||
|
||||
when isMainModule:
|
||||
runAllBenchmarks()
|
|
@ -0,0 +1,76 @@
|
|||
import std/tables
|
||||
|
||||
template withDir*(dir: string, blk: untyped) =
|
||||
## set working dir for duration of blk
|
||||
let prev = getCurrentDir()
|
||||
try:
|
||||
setCurrentDir(dir)
|
||||
`blk`
|
||||
finally:
|
||||
setCurrentDir(prev)
|
||||
|
||||
template runit*(cmd: string) =
|
||||
## run shell commands and verify it runs without an error code
|
||||
echo "RUNNING: ", cmd
|
||||
let cmdRes = execShellCmd(cmd)
|
||||
echo "STATUS: ", cmdRes
|
||||
assert cmdRes == 0
|
||||
|
||||
var benchRuns* = newTable[string, tuple[avgTimeSec: float, count: int]]()
|
||||
|
||||
func avg(vals: openArray[float]): float =
|
||||
for v in vals:
|
||||
result += v / vals.len().toFloat()
|
||||
|
||||
template benchmark*(name: untyped, count: int, blk: untyped) =
|
||||
let benchmarkName: string = name
|
||||
## simple benchmarking of a block of code
|
||||
var runs = newSeqOfCap[float](count)
|
||||
for i in 1 .. count:
|
||||
block:
|
||||
let t0 = epochTime()
|
||||
`blk`
|
||||
let elapsed = epochTime() - t0
|
||||
runs.add elapsed
|
||||
|
||||
var elapsedStr = ""
|
||||
for v in runs:
|
||||
elapsedStr &= ", " & v.formatFloat(format = ffDecimal, precision = 3)
|
||||
stdout.styledWriteLine(
|
||||
fgGreen, "CPU Time [", benchmarkName, "] ", "avg(", $count, "): ", elapsedStr, " s"
|
||||
)
|
||||
benchRuns[benchmarkName] = (runs.avg(), count)
|
||||
|
||||
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
|
||||
if printRegular:
|
||||
echo ""
|
||||
for k, v in benchRuns:
|
||||
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
||||
|
||||
if printTsv:
|
||||
echo ""
|
||||
echo "name", "\t", "avgTimeSec", "\t", "count"
|
||||
for k, v in benchRuns:
|
||||
echo k, "\t", v.avgTimeSec, "\t", v.count
|
||||
|
||||
|
||||
import std/math
|
||||
|
||||
func floorLog2*(x: int): int =
|
||||
var k = -1
|
||||
var y = x
|
||||
while (y > 0):
|
||||
k += 1
|
||||
y = y shr 1
|
||||
return k
|
||||
|
||||
func ceilingLog2*(x: int): int =
|
||||
if (x == 0):
|
||||
return -1
|
||||
else:
|
||||
return (floorLog2(x - 1) + 1)
|
||||
|
||||
func checkPowerOfTwo*(x: int, what: string): int =
|
||||
let k = ceilingLog2(x)
|
||||
assert(x == 2 ^ k, ("`" & what & "` is expected to be a power of 2"))
|
||||
return x
|
|
@ -0,0 +1,91 @@
|
|||
mode = ScriptMode.Verbose
|
||||
|
||||
|
||||
### Helper functions
|
||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
||||
var extra_params = params
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
extra_params &= " " & param
|
||||
else:
|
||||
for i in 2..<paramCount():
|
||||
extra_params &= " " & paramStr(i)
|
||||
|
||||
let cmd = "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
|
||||
exec(cmd)
|
||||
|
||||
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, srcDir, params
|
||||
exec "build/" & name
|
||||
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
|
||||
task testCodex, "Build & run Codex tests":
|
||||
test "testCodex", params = "-d:codex_enable_proof_failures=true"
|
||||
|
||||
task testContracts, "Build & run Codex Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
test "testIntegration"
|
||||
|
||||
task build, "build codex binary":
|
||||
codexTask()
|
||||
|
||||
task test, "Run tests":
|
||||
testCodexTask()
|
||||
|
||||
task testAll, "Run all tests (except for Taiko L2 tests)":
|
||||
testCodexTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
||||
|
||||
task testTaiko, "Run Taiko L2 tests":
|
||||
codexTask()
|
||||
test "testTaiko"
|
||||
|
||||
import strutils
|
||||
import os
|
||||
|
||||
task coverage, "generates code coverage report":
|
||||
var (output, exitCode) = gorgeEx("which lcov")
|
||||
if exitCode != 0:
|
||||
echo " ************************** ⛔️ ERROR ⛔️ **************************"
|
||||
echo " ** ERROR: lcov not found, it must be installed to run code **"
|
||||
echo " ** coverage locally **"
|
||||
echo " *****************************************************************"
|
||||
quit 1
|
||||
|
||||
(output, exitCode) = gorgeEx("gcov --version")
|
||||
if output.contains("Apple LLVM"):
|
||||
echo " ************************* ⚠️ WARNING ⚠️ *************************"
|
||||
echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
|
||||
echo " ** emulates an old version of gcov (4.2.0) and therefore **"
|
||||
echo " ** coverage results will differ than those on CI (which **"
|
||||
echo " ** uses a much newer version of gcov). **"
|
||||
echo " *****************************************************************"
|
||||
|
||||
var nimSrcs = " "
|
||||
for f in walkDirRec("codex", {pcFile}):
|
||||
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
|
||||
echo "======== Running Tests ======== "
|
||||
test "coverage", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
|
||||
exec("rm nimcache/coverage/*.c")
|
||||
rmDir("coverage"); mkDir("coverage")
|
||||
echo " ======== Running LCOV ======== "
|
||||
exec("lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info")
|
||||
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
|
||||
echo " ======== Generating HTML coverage report ======== "
|
||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
||||
echo " ======== Coverage report Done ======== "
|
||||
|
||||
task showCoverage, "open coverage html":
|
||||
echo " ======== Opening HTML coverage report in browser... ======== "
|
||||
if findExe("open") != "":
|
||||
exec("open coverage/report/index.html")
|
161
codex.nim
161
codex.nim
|
@ -7,18 +7,28 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/confutils
|
||||
import pkg/confutils/defs
|
||||
import pkg/confutils/std/net
|
||||
import pkg/confutils/toml/defs as confTomlDefs
|
||||
import pkg/confutils/toml/std/net as confTomlNet
|
||||
import pkg/confutils/toml/std/uri as confTomlUri
|
||||
import pkg/toml_serialization
|
||||
import pkg/libp2p
|
||||
|
||||
import ./codex/conf
|
||||
import ./codex/codex
|
||||
import ./codex/logutils
|
||||
import ./codex/units
|
||||
import ./codex/utils/keyutils
|
||||
import ./codex/codextypes
|
||||
|
||||
export codex, conf, libp2p, chronos, chronicles
|
||||
export codex, conf, libp2p, chronos, logutils
|
||||
|
||||
when isMainModule:
|
||||
import std/sequtils
|
||||
import std/os
|
||||
import pkg/confutils/defs
|
||||
import ./codex/utils/fileutils
|
||||
|
@ -29,72 +39,117 @@ when isMainModule:
|
|||
when defined(posix):
|
||||
import system/ansi_c
|
||||
|
||||
type
|
||||
CodexStatus {.pure.} = enum
|
||||
Stopped,
|
||||
Stopping,
|
||||
Running
|
||||
|
||||
let config = CodexConf.load(
|
||||
version = codexFullVersion
|
||||
version = codexFullVersion,
|
||||
envVarsPrefix = "codex",
|
||||
secondarySources = proc (config: CodexConf, sources: auto) =
|
||||
if configFile =? config.configFile:
|
||||
sources.addConfigFile(Toml, configFile)
|
||||
)
|
||||
config.setupLogging()
|
||||
config.setupMetrics()
|
||||
|
||||
case config.cmd:
|
||||
of StartUpCommand.noCommand:
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
|
||||
var
|
||||
state: CodexStatus
|
||||
shutdown: Future[void]
|
||||
|
||||
let
|
||||
keyPath =
|
||||
if isAbsolute(config.netPrivKeyFile):
|
||||
config.netPrivKeyFile
|
||||
else:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Codex", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
## Ctrl+C handling
|
||||
proc doShutdown() =
|
||||
shutdown = server.stop()
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
notice "Stopping Codex"
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo").string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
doShutdown()
|
||||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
try:
|
||||
setControlCHook(controlCHandler)
|
||||
except Exception as exc: # TODO Exception
|
||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||
|
||||
let
|
||||
keyPath =
|
||||
if isAbsolute(string config.netPrivKeyFile):
|
||||
string config.netPrivKeyFile
|
||||
else:
|
||||
string config.dataDir / string config.netPrivKeyFile
|
||||
# equivalent SIGTERM handler
|
||||
when defined(posix):
|
||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after having received SIGTERM"
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = CodexServer.new(config, privateKey)
|
||||
doShutdown()
|
||||
|
||||
## Ctrl+C handling
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
waitFor server.stop()
|
||||
|
||||
try:
|
||||
setControlCHook(controlCHandler)
|
||||
except Exception as exc: # TODO Exception
|
||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||
|
||||
# equivalent SIGTERM handler
|
||||
when defined(posix):
|
||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after having received SIGTERM"
|
||||
waitFor server.stop()
|
||||
notice "Stopped Codex"
|
||||
|
||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||
|
||||
try:
|
||||
waitFor server.start()
|
||||
notice "Exited codex"
|
||||
except CatchableError as error:
|
||||
error "Codex failed to start", error = error.msg
|
||||
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
||||
# but this would mean we'd have to fix the implementation of all
|
||||
# services so they won't crash if we attempt to stop them before they
|
||||
# had a chance to start (currently you'll get a SISGSEV if you try to).
|
||||
quit QuitFailure
|
||||
|
||||
of StartUpCommand.initNode:
|
||||
discard
|
||||
state = CodexStatus.Running
|
||||
while state == CodexStatus.Running:
|
||||
try:
|
||||
# poll chronos
|
||||
chronos.poll()
|
||||
except Exception as exc:
|
||||
error "Unhandled exception in async proc, aborting", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
try:
|
||||
# signal handlers guarantee that the shutdown Future will
|
||||
# be assigned before state switches to Stopping
|
||||
waitFor shutdown
|
||||
except CatchableError as error:
|
||||
error "Codex didn't shutdown correctly", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
notice "Exited codex"
|
||||
|
|
73
codex.nimble
73
codex.nimble
|
@ -1,78 +1,9 @@
|
|||
mode = ScriptMode.Verbose
|
||||
|
||||
version = "0.1.0"
|
||||
author = "Codex Team"
|
||||
description = "p2p data durability engine"
|
||||
license = "MIT"
|
||||
binDir = "build"
|
||||
srcDir = "."
|
||||
installFiles = @["build.nims"]
|
||||
|
||||
requires "nim >= 1.2.0",
|
||||
"asynctest >= 0.3.2 & < 0.4.0",
|
||||
"bearssl >= 0.1.4",
|
||||
"chronicles >= 0.7.2",
|
||||
"chronos >= 2.5.2",
|
||||
"confutils",
|
||||
"ethers >= 0.2.0 & < 0.3.0",
|
||||
"libbacktrace",
|
||||
"libp2p",
|
||||
"metrics",
|
||||
"nimcrypto >= 0.4.1",
|
||||
"nitro >= 0.5.1 & < 0.6.0",
|
||||
"presto",
|
||||
"protobuf_serialization >= 0.2.0 & < 0.3.0",
|
||||
"questionable >= 0.10.6 & < 0.11.0",
|
||||
"secp256k1",
|
||||
"stew",
|
||||
"upraises >= 0.1.0 & < 0.2.0",
|
||||
"lrucache",
|
||||
"leopard >= 0.1.0 & < 0.2.0",
|
||||
"blscurve",
|
||||
"libp2pdht",
|
||||
"eth"
|
||||
|
||||
when declared(namedBin):
|
||||
namedBin = {
|
||||
"codex/codex": "codex"
|
||||
}.toTable()
|
||||
|
||||
### Helper functions
|
||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
||||
var extra_params = params
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams:
|
||||
extra_params &= " " & param
|
||||
else:
|
||||
for i in 2..<paramCount():
|
||||
extra_params &= " " & paramStr(i)
|
||||
|
||||
|
||||
exec "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
|
||||
|
||||
proc test(name: string, srcDir = "tests/", lang = "c") =
|
||||
buildBinary name, srcDir
|
||||
exec "build/" & name
|
||||
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
|
||||
task testCodex, "Build & run Codex tests":
|
||||
test "testCodex"
|
||||
|
||||
task testContracts, "Build & run Codex Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
codexTask()
|
||||
test "testIntegration"
|
||||
|
||||
task test, "Run tests":
|
||||
testCodexTask()
|
||||
|
||||
task testAll, "Run all tests":
|
||||
testCodexTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
||||
include "build.nims"
|
||||
|
|
|
@ -10,26 +10,28 @@
|
|||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/cid
|
||||
import pkg/libp2p/multicodec
|
||||
import pkg/metrics
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../protobuf/presence
|
||||
import ./pendingblocks
|
||||
|
||||
import ../protobuf/presence
|
||||
import ../network
|
||||
import ../peers
|
||||
|
||||
import ../../utils
|
||||
import ../../discovery
|
||||
import ../../stores/blockstore
|
||||
|
||||
import ./pendingblocks
|
||||
import ../../logutils
|
||||
import ../../manifest
|
||||
|
||||
logScope:
|
||||
topics = "codex discoveryengine"
|
||||
|
||||
declareGauge(codex_inflight_discovery, "inflight discovery requests")
|
||||
declareGauge(codexInflightDiscovery, "inflight discovery requests")
|
||||
|
||||
const
|
||||
DefaultConcurrentDiscRequests = 10
|
||||
|
@ -37,7 +39,7 @@ const
|
|||
DefaultDiscoveryTimeout = 1.minutes
|
||||
DefaultMinPeersPerBlock = 3
|
||||
DefaultDiscoveryLoopSleep = 3.seconds
|
||||
DefaultAdvertiseLoopSleep = 3.seconds
|
||||
DefaultAdvertiseLoopSleep = 30.minutes
|
||||
|
||||
type
|
||||
DiscoveryEngine* = ref object of RootObj
|
||||
|
@ -60,41 +62,55 @@ type
|
|||
advertiseLoopSleep: Duration # Advertise loop sleep
|
||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||
advertiseType*: BlockType # Advertice blocks, manifests or both
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantList):
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
try:
|
||||
await b.discoveryQueue.put(cid)
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
trace "Exception in discovery loop", exc = exc.msg
|
||||
warn "Exception in discovery loop", exc = exc.msg
|
||||
|
||||
logScope:
|
||||
sleep = b.discoveryLoopSleep
|
||||
wanted = b.pendingBlocks.len
|
||||
|
||||
trace "About to sleep discovery loop"
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
|
||||
proc advertiseQueueLoop*(b: DiscoveryEngine) {.async.} =
|
||||
proc onBlock(cid: Cid) {.async.} =
|
||||
try:
|
||||
trace "Listed block", cid
|
||||
await b.advertiseQueue.put(cid)
|
||||
await sleepAsync(50.millis) # TODO: temp workaround because we're announcing all CIDs
|
||||
except CancelledError as exc:
|
||||
trace "Cancelling block listing"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Exception listing blocks", exc = exc.msg
|
||||
proc advertiseBlock(b: DiscoveryEngine, cid: Cid) {.async.} =
|
||||
without isM =? cid.isManifest, err:
|
||||
warn "Unable to determine if cid is manifest"
|
||||
return
|
||||
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
|
||||
# announce manifest cid and tree cid
|
||||
await b.advertiseQueue.put(cid)
|
||||
await b.advertiseQueue.put(manifest.treeCid)
|
||||
|
||||
proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||
while b.discEngineRunning:
|
||||
discard await b.localStore.listBlocks(onBlock)
|
||||
if cids =? await b.localStore.listBlocks(blockType = b.advertiseType):
|
||||
trace "Begin iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Iterating blocks finished."
|
||||
|
||||
trace "About to sleep advertise loop", sleep = b.advertiseLoopSleep
|
||||
await sleepAsync(b.advertiseLoopSleep)
|
||||
|
||||
trace "Exiting advertise task loop"
|
||||
info "Exiting advertise task loop"
|
||||
|
||||
proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||
## Run advertise tasks
|
||||
|
@ -106,7 +122,6 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
cid = await b.advertiseQueue.get()
|
||||
|
||||
if cid in b.inFlightAdvReqs:
|
||||
trace "Advertise request already in progress", cid
|
||||
continue
|
||||
|
||||
try:
|
||||
|
@ -114,18 +129,19 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
request = b.discovery.provide(cid)
|
||||
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
||||
trace "Advertising block", cid, inflight = b.inFlightAdvReqs.len
|
||||
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||
await request
|
||||
|
||||
finally:
|
||||
b.inFlightAdvReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
||||
trace "Advertised block", cid, inflight = b.inFlightAdvReqs.len
|
||||
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Advertise task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
trace "Exception in advertise task runner", exc = exc.msg
|
||||
warn "Exception in advertise task runner", exc = exc.msg
|
||||
|
||||
trace "Exiting advertise task runner"
|
||||
info "Exiting advertise task runner"
|
||||
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||
## Run discovery tasks
|
||||
|
@ -143,9 +159,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
let
|
||||
haves = b.peers.peersHave(cid)
|
||||
|
||||
trace "Current number of peers for block", cid, count = haves.len
|
||||
if haves.len < b.minPeersPerBlock:
|
||||
trace "Discovering block", cid
|
||||
try:
|
||||
let
|
||||
request = b.discovery
|
||||
|
@ -153,11 +167,10 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
.wait(DefaultDiscoveryTimeout)
|
||||
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
||||
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||
let
|
||||
peers = await request
|
||||
|
||||
trace "Discovered peers", peers = peers.len
|
||||
let
|
||||
dialed = await allFinished(
|
||||
peers.mapIt( b.network.dialPeer(it.data) ))
|
||||
|
@ -168,29 +181,30 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
|
||||
finally:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
||||
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
trace "Exception in discovery task runner", exc = exc.msg
|
||||
warn "Exception in discovery task runner", exc = exc.msg
|
||||
|
||||
trace "Exiting discovery task runner"
|
||||
info "Exiting discovery task runner"
|
||||
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
for cid in cids:
|
||||
if cid notin b.discoveryQueue:
|
||||
try:
|
||||
trace "Queueing find block", cid, queue = b.discoveryQueue.len
|
||||
b.discoveryQueue.putNoWait(cid)
|
||||
except CatchableError as exc:
|
||||
trace "Exception queueing discovery request", exc = exc.msg
|
||||
warn "Exception queueing discovery request", exc = exc.msg
|
||||
|
||||
proc queueProvideBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
for cid in cids:
|
||||
if cid notin b.advertiseQueue:
|
||||
try:
|
||||
trace "Queueing provide block", cid, queue = b.discoveryQueue.len
|
||||
b.advertiseQueue.putNoWait(cid)
|
||||
except CatchableError as exc:
|
||||
trace "Exception queueing discovery request", exc = exc.msg
|
||||
warn "Exception queueing discovery request", exc = exc.msg
|
||||
|
||||
proc start*(b: DiscoveryEngine) {.async.} =
|
||||
## Start the discengine task
|
||||
|
@ -222,16 +236,16 @@ proc stop*(b: DiscoveryEngine) {.async.} =
|
|||
return
|
||||
|
||||
b.discEngineRunning = false
|
||||
for t in b.advertiseTasks:
|
||||
if not t.finished:
|
||||
for task in b.advertiseTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting advertise task to stop"
|
||||
await t.cancelAndWait()
|
||||
await task.cancelAndWait()
|
||||
trace "Advertise task stopped"
|
||||
|
||||
for t in b.discoveryTasks:
|
||||
if not t.finished:
|
||||
for task in b.discoveryTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting discovery task to stop"
|
||||
await t.cancelAndWait()
|
||||
await task.cancelAndWait()
|
||||
trace "Discovery task stopped"
|
||||
|
||||
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:
|
||||
|
@ -247,18 +261,22 @@ proc stop*(b: DiscoveryEngine) {.async.} =
|
|||
trace "Discovery engine stopped"
|
||||
|
||||
proc new*(
|
||||
T: type DiscoveryEngine,
|
||||
localStore: BlockStore,
|
||||
peers: PeerCtxStore,
|
||||
network: BlockExcNetwork,
|
||||
discovery: Discovery,
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
|
||||
minPeersPerBlock = DefaultMinPeersPerBlock,): DiscoveryEngine =
|
||||
T(
|
||||
T: type DiscoveryEngine,
|
||||
localStore: BlockStore,
|
||||
peers: PeerCtxStore,
|
||||
network: BlockExcNetwork,
|
||||
discovery: Discovery,
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
|
||||
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||
advertiseType = BlockType.Manifest
|
||||
): DiscoveryEngine =
|
||||
## Create a discovery engine instance for advertising services
|
||||
##
|
||||
DiscoveryEngine(
|
||||
localStore: localStore,
|
||||
peers: peers,
|
||||
network: network,
|
||||
|
@ -272,4 +290,5 @@ proc new*(
|
|||
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
||||
discoveryLoopSleep: discoveryLoopSleep,
|
||||
advertiseLoopSleep: advertiseLoopSleep,
|
||||
minPeersPerBlock: minPeersPerBlock)
|
||||
minPeersPerBlock: minPeersPerBlock,
|
||||
advertiseType: advertiseType)
|
||||
|
|
|
@ -11,15 +11,20 @@ import std/sequtils
|
|||
import std/sets
|
||||
import std/options
|
||||
import std/algorithm
|
||||
import std/sugar
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/[cid, switch, multihash, multicodec]
|
||||
import pkg/metrics
|
||||
import pkg/stint
|
||||
import pkg/questionable
|
||||
|
||||
import ../../stores/blockstore
|
||||
import ../../blocktype as bt
|
||||
import ../../blocktype
|
||||
import ../../utils
|
||||
import ../../merkletree
|
||||
import ../../logutils
|
||||
import ../../manifest
|
||||
|
||||
import ../protobuf/blockexc
|
||||
import ../protobuf/presence
|
||||
|
@ -36,16 +41,23 @@ export peers, pendingblocks, payments, discovery
|
|||
logScope:
|
||||
topics = "codex blockexcengine"
|
||||
|
||||
declareCounter(codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent")
|
||||
declareCounter(codex_block_exchange_want_have_lists_received, "codex blockexchange wantHave lists received")
|
||||
declareCounter(codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent")
|
||||
declareCounter(codex_block_exchange_want_block_lists_received, "codex blockexchange wantBlock lists received")
|
||||
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
|
||||
declareCounter(codex_block_exchange_blocks_received, "codex blockexchange blocks received")
|
||||
|
||||
const
|
||||
DefaultMaxPeersPerRequest* = 10
|
||||
DefaultTaskQueueSize = 100
|
||||
DefaultConcurrentTasks = 10
|
||||
DefaultMaxRetries = 3
|
||||
DefaultConcurrentDiscRequests = 10
|
||||
DefaultConcurrentAdvertRequests = 10
|
||||
DefaultDiscoveryTimeout = 1.minutes
|
||||
DefaultMaxQueriedBlocksCache = 1000
|
||||
DefaultMinPeersPerBlock = 3
|
||||
# DefaultMaxRetries = 3
|
||||
# DefaultConcurrentDiscRequests = 10
|
||||
# DefaultConcurrentAdvertRequests = 10
|
||||
# DefaultDiscoveryTimeout = 1.minutes
|
||||
# DefaultMaxQueriedBlocksCache = 1000
|
||||
# DefaultMinPeersPerBlock = 3
|
||||
|
||||
type
|
||||
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
|
||||
|
@ -63,18 +75,13 @@ type
|
|||
peersPerRequest: int # Max number of peers to request from
|
||||
wallet*: WalletRef # Nitro wallet for micropayments
|
||||
pricing*: ?Pricing # Optional bandwidth pricing
|
||||
blockFetchTimeout*: Duration # Timeout for fetching blocks over the network
|
||||
discovery*: DiscoveryEngine
|
||||
|
||||
Pricing* = object
|
||||
address*: EthAddress
|
||||
price*: UInt256
|
||||
|
||||
proc contains*(a: AsyncHeapQueue[Entry], b: Cid): bool =
|
||||
## Convenience method to check for entry prepense
|
||||
##
|
||||
|
||||
a.anyIt( it.cid == b )
|
||||
|
||||
# attach task scheduler to engine
|
||||
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
|
||||
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
||||
|
@ -108,100 +115,106 @@ proc stop*(b: BlockExcEngine) {.async.} =
|
|||
return
|
||||
|
||||
b.blockexcRunning = false
|
||||
for t in b.blockexcTasks:
|
||||
if not t.finished:
|
||||
for task in b.blockexcTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting task to stop"
|
||||
await t.cancelAndWait()
|
||||
await task.cancelAndWait()
|
||||
trace "Task stopped"
|
||||
|
||||
trace "NetworkStore stopped"
|
||||
|
||||
proc requestBlock*(
|
||||
proc sendWantHave(
|
||||
b: BlockExcEngine,
|
||||
cid: Cid,
|
||||
timeout = DefaultBlockTimeout): Future[bt.Block] {.async.} =
|
||||
## Request a block from remotes
|
||||
##
|
||||
address: BlockAddress, # pluralize this entire call chain, please
|
||||
excluded: seq[BlockExcPeerCtx],
|
||||
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
|
||||
trace "Sending wantHave request to peers", address
|
||||
for p in peers:
|
||||
if p notin excluded:
|
||||
if address notin p.peerHave:
|
||||
await b.network.request.sendWantList(
|
||||
p.id,
|
||||
@[address],
|
||||
wantType = WantType.WantHave) # we only want to know if the peer has the block
|
||||
|
||||
trace "Requesting block", cid, peers = b.peers.len
|
||||
|
||||
if b.pendingBlocks.isInFlight(cid):
|
||||
trace "Request handle already pending", cid
|
||||
return await b.pendingBlocks.getWantHandle(cid, timeout)
|
||||
|
||||
let
|
||||
blk = b.pendingBlocks.getWantHandle(cid, timeout)
|
||||
|
||||
var
|
||||
peers = b.peers.selectCheapest(cid)
|
||||
|
||||
if peers.len <= 0:
|
||||
trace "No cheapest peers, selecting first in list", cid
|
||||
peers = toSeq(b.peers) # Get any peer
|
||||
if peers.len <= 0:
|
||||
trace "No peers to request blocks from", cid
|
||||
b.discovery.queueFindBlocksReq(@[cid])
|
||||
return await blk
|
||||
|
||||
let
|
||||
blockPeer = peers[0] # get cheapest
|
||||
|
||||
proc blockHandleMonitor() {.async.} =
|
||||
try:
|
||||
trace "Monigoring block handle", cid
|
||||
b.pendingBlocks.setInFlight(cid, true)
|
||||
discard await blk
|
||||
trace "Block handle success", cid
|
||||
except CatchableError as exc:
|
||||
trace "Error block handle, disconnecting peer", cid, exc = exc.msg
|
||||
|
||||
# TODO: really, this is just a quick and dirty way of
|
||||
# preventing hitting the same "bad" peer every time, however,
|
||||
# we might as well discover this on or next iteration, so
|
||||
# it doesn't mean that we're never talking to this peer again.
|
||||
# TODO: we need a lot more work around peer selection and
|
||||
# prioritization
|
||||
|
||||
# drop unresponsive peer
|
||||
await b.network.switch.disconnect(blockPeer.id)
|
||||
|
||||
trace "Sending block request to peer", peer = blockPeer.id, cid
|
||||
|
||||
# monitor block handle
|
||||
asyncSpawn blockHandleMonitor()
|
||||
|
||||
# request block
|
||||
proc sendWantBlock(
|
||||
b: BlockExcEngine,
|
||||
address: BlockAddress, # pluralize this entire call chain, please
|
||||
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
|
||||
trace "Sending wantBlock request to", peer = blockPeer.id, address
|
||||
await b.network.request.sendWantList(
|
||||
blockPeer.id,
|
||||
@[cid],
|
||||
@[address],
|
||||
wantType = WantType.WantBlock) # we want this remote to send us a block
|
||||
|
||||
if (peers.len - 1) == 0:
|
||||
trace "No peers to send want list to", cid
|
||||
b.discovery.queueFindBlocksReq(@[cid])
|
||||
return await blk # no peers to send wants to
|
||||
proc monitorBlockHandle(
|
||||
b: BlockExcEngine,
|
||||
handle: Future[Block],
|
||||
address: BlockAddress,
|
||||
peerId: PeerId) {.async.} =
|
||||
|
||||
# filter out the peer we've already requested from
|
||||
let remaining = peers[1..min(peers.high, b.peersPerRequest)]
|
||||
trace "Sending want list to remaining peers", count = remaining.len
|
||||
for p in remaining:
|
||||
if cid notin p.peerHave:
|
||||
# just send wants
|
||||
await b.network.request.sendWantList(
|
||||
p.id,
|
||||
@[cid],
|
||||
wantType = WantType.WantHave) # we only want to know if the peer has the block
|
||||
try:
|
||||
discard await handle
|
||||
except CancelledError as exc:
|
||||
trace "Block handle cancelled", address, peerId
|
||||
except CatchableError as exc:
|
||||
warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
|
||||
|
||||
return await blk
|
||||
# TODO: really, this is just a quick and dirty way of
|
||||
# preventing hitting the same "bad" peer every time, however,
|
||||
# we might as well discover this on or next iteration, so
|
||||
# it doesn't mean that we're never talking to this peer again.
|
||||
# TODO: we need a lot more work around peer selection and
|
||||
# prioritization
|
||||
|
||||
# drop unresponsive peer
|
||||
await b.network.switch.disconnect(peerId)
|
||||
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||
|
||||
proc requestBlock*(
|
||||
b: BlockExcEngine,
|
||||
address: BlockAddress,
|
||||
): Future[?!Block] {.async.} =
|
||||
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
|
||||
|
||||
if not b.pendingBlocks.isInFlight(address):
|
||||
let peers = b.peers.selectCheapest(address)
|
||||
if peers.len == 0:
|
||||
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||
|
||||
let maybePeer =
|
||||
if peers.len > 0:
|
||||
peers[hash(address) mod peers.len].some
|
||||
elif b.peers.len > 0:
|
||||
toSeq(b.peers)[hash(address) mod b.peers.len].some
|
||||
else:
|
||||
BlockExcPeerCtx.none
|
||||
|
||||
if peer =? maybePeer:
|
||||
asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id)
|
||||
b.pendingBlocks.setInFlight(address)
|
||||
await b.sendWantBlock(address, peer)
|
||||
codex_block_exchange_want_block_lists_sent.inc()
|
||||
await b.sendWantHave(address, @[peer], toSeq(b.peers))
|
||||
codex_block_exchange_want_have_lists_sent.inc()
|
||||
|
||||
# Don't let timeouts bubble up. We can't be too broad here or we break
|
||||
# cancellations.
|
||||
try:
|
||||
success await blockFuture
|
||||
except AsyncTimeoutError as err:
|
||||
failure err
|
||||
|
||||
proc requestBlock*(
|
||||
b: BlockExcEngine,
|
||||
cid: Cid
|
||||
): Future[?!Block] =
|
||||
b.requestBlock(BlockAddress.init(cid))
|
||||
|
||||
proc blockPresenceHandler*(
|
||||
b: BlockExcEngine,
|
||||
peer: PeerId,
|
||||
blocks: seq[BlockPresence]) {.async.} =
|
||||
## Handle block presence
|
||||
##
|
||||
|
||||
trace "Received presence update for peer", peer, blocks = blocks.len
|
||||
let
|
||||
peerCtx = b.peers.get(peer)
|
||||
wantList = toSeq(b.pendingBlocks.wantList)
|
||||
|
@ -211,12 +224,6 @@ proc blockPresenceHandler*(
|
|||
|
||||
for blk in blocks:
|
||||
if presence =? Presence.init(blk):
|
||||
logScope:
|
||||
cid = presence.cid
|
||||
have = presence.have
|
||||
price = presence.price
|
||||
|
||||
trace "Updating precense"
|
||||
peerCtx.setPresence(presence)
|
||||
|
||||
let
|
||||
|
@ -226,166 +233,237 @@ proc blockPresenceHandler*(
|
|||
)
|
||||
|
||||
if dontWantCids.len > 0:
|
||||
trace "Cleaning peer haves", peer, count = dontWantCids.len
|
||||
peerCtx.cleanPresence(dontWantCids)
|
||||
|
||||
trace "Peer want/have", items = peerHave.len, wantList = wantList.len
|
||||
let
|
||||
wantCids = wantList.filterIt(
|
||||
it in peerHave
|
||||
)
|
||||
|
||||
if wantCids.len > 0:
|
||||
trace "Getting blocks based on updated precense", peer, count = wantCids.len
|
||||
trace "Peer has blocks in our wantList", peer, wantCount = wantCids.len
|
||||
discard await allFinished(
|
||||
wantCids.mapIt(b.requestBlock(it)))
|
||||
trace "Requested blocks based on updated precense", peer, count = wantCids.len
|
||||
wantCids.mapIt(b.sendWantBlock(it, peerCtx)))
|
||||
|
||||
# if none of the connected peers report our wants in their have list,
|
||||
# fire up discovery
|
||||
b.discovery.queueFindBlocksReq(
|
||||
toSeq(b.pendingBlocks.wantList)
|
||||
toSeq(b.pendingBlocks.wantListCids)
|
||||
.filter do(cid: Cid) -> bool:
|
||||
not b.peers.anyIt( cid in it.peerHave ))
|
||||
|
||||
proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
|
||||
trace "Schedule a task for new blocks", items = blocks.len
|
||||
not b.peers.anyIt( cid in it.peerHaveCids ))
|
||||
|
||||
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
let
|
||||
cids = blocks.mapIt( it.cid )
|
||||
cids = blocksDelivery.mapIt( it.blk.cid )
|
||||
|
||||
# schedule any new peers to provide blocks to
|
||||
for p in b.peers:
|
||||
for c in cids: # for each cid
|
||||
# schedule a peer if it wants at least one cid
|
||||
# and we have it in our local store
|
||||
if c in p.peerWants:
|
||||
if c in p.peerWantsCids:
|
||||
if await (c in b.localStore):
|
||||
if b.scheduleTask(p):
|
||||
trace "Task scheduled for peer", peer = p.id
|
||||
else:
|
||||
trace "Unable to schedule task for peer", peer = p.id
|
||||
warn "Unable to schedule task for peer", peer = p.id
|
||||
|
||||
break # do next peer
|
||||
|
||||
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
|
||||
## Resolve pending blocks from the pending blocks manager
|
||||
## and schedule any new task to be ran
|
||||
##
|
||||
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
||||
## Tells neighboring peers that we're no longer interested in a block.
|
||||
trace "Sending block request cancellations to peers", addrs = addrs.len
|
||||
|
||||
trace "Resolving blocks", blocks = blocks.len
|
||||
let failed = (await allFinished(
|
||||
b.peers.mapIt(
|
||||
b.network.request.sendWantCancellations(
|
||||
peer = it.id,
|
||||
addresses = addrs))))
|
||||
.filterIt(it.failed)
|
||||
|
||||
b.pendingBlocks.resolve(blocks)
|
||||
await b.scheduleTasks(blocks)
|
||||
b.discovery.queueProvideBlocksReq(blocks.mapIt( it.cid ))
|
||||
if failed.len > 0:
|
||||
warn "Failed to send block request cancellations to peers", peers = failed.len
|
||||
|
||||
proc getAnnouceCids(blocksDelivery: seq[BlockDelivery]): seq[Cid] =
|
||||
var cids = initHashSet[Cid]()
|
||||
for bd in blocksDelivery:
|
||||
if bd.address.leaf:
|
||||
cids.incl(bd.address.treeCid)
|
||||
else:
|
||||
without isM =? bd.address.cid.isManifest, err:
|
||||
warn "Unable to determine if cid is manifest"
|
||||
continue
|
||||
if isM:
|
||||
cids.incl(bd.address.cid)
|
||||
return cids.toSeq
|
||||
|
||||
proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
b.pendingBlocks.resolve(blocksDelivery)
|
||||
await b.scheduleTasks(blocksDelivery)
|
||||
let announceCids = getAnnouceCids(blocksDelivery)
|
||||
await b.cancelBlocks(blocksDelivery.mapIt(it.address))
|
||||
|
||||
b.discovery.queueProvideBlocksReq(announceCids)
|
||||
|
||||
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
||||
await b.resolveBlocks(
|
||||
blocks.mapIt(
|
||||
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)
|
||||
)))
|
||||
|
||||
proc payForBlocks(engine: BlockExcEngine,
|
||||
peer: BlockExcPeerCtx,
|
||||
blocks: seq[bt.Block]) {.async.} =
|
||||
trace "Paying for blocks", blocks = blocks.len
|
||||
|
||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
let
|
||||
sendPayment = engine.network.request.sendPayment
|
||||
price = peer.price(blocks.mapIt(it.cid))
|
||||
price = peer.price(blocksDelivery.mapIt(it.address))
|
||||
|
||||
if payment =? engine.wallet.pay(peer, price):
|
||||
trace "Sending payment for blocks", price
|
||||
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
||||
await sendPayment(peer.id, payment)
|
||||
|
||||
proc blocksHandler*(
|
||||
proc validateBlockDelivery(
|
||||
b: BlockExcEngine,
|
||||
bd: BlockDelivery): ?!void =
|
||||
if bd.address notin b.pendingBlocks:
|
||||
return failure("Received block is not currently a pending block")
|
||||
|
||||
if bd.address.leaf:
|
||||
without proof =? bd.proof:
|
||||
return failure("Missing proof")
|
||||
|
||||
if proof.index != bd.address.index:
|
||||
return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index)
|
||||
|
||||
without leaf =? bd.blk.cid.mhash.mapFailure, err:
|
||||
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
|
||||
|
||||
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
|
||||
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
|
||||
|
||||
if err =? proof.verify(leaf, treeRoot).errorOption:
|
||||
return failure("Unable to verify proof for block, nested err: " & err.msg)
|
||||
|
||||
else: # not leaf
|
||||
if bd.address.cid != bd.blk.cid:
|
||||
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
|
||||
|
||||
return success()
|
||||
|
||||
proc blocksDeliveryHandler*(
|
||||
b: BlockExcEngine,
|
||||
peer: PeerId,
|
||||
blocks: seq[bt.Block]) {.async.} =
|
||||
## handle incoming blocks
|
||||
##
|
||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt($it.address)).join(",")
|
||||
|
||||
trace "Got blocks from peer", peer, len = blocks.len
|
||||
for blk in blocks:
|
||||
if isErr (await b.localStore.putBlock(blk)):
|
||||
trace "Unable to store block", cid = blk.cid
|
||||
var validatedBlocksDelivery: seq[BlockDelivery]
|
||||
for bd in blocksDelivery:
|
||||
logScope:
|
||||
peer = peer
|
||||
address = bd.address
|
||||
|
||||
if err =? b.validateBlockDelivery(bd).errorOption:
|
||||
warn "Block validation failed", msg = err.msg
|
||||
continue
|
||||
|
||||
if err =? (await b.localStore.putBlock(bd.blk)).errorOption:
|
||||
error "Unable to store block", err = err.msg
|
||||
continue
|
||||
|
||||
if bd.address.leaf:
|
||||
without proof =? bd.proof:
|
||||
error "Proof expected for a leaf block delivery"
|
||||
continue
|
||||
if err =? (await b.localStore.putCidAndProof(
|
||||
bd.address.treeCid,
|
||||
bd.address.index,
|
||||
bd.blk.cid,
|
||||
proof)).errorOption:
|
||||
|
||||
error "Unable to store proof and cid for a block"
|
||||
continue
|
||||
|
||||
validatedBlocksDelivery.add(bd)
|
||||
|
||||
await b.resolveBlocks(validatedBlocksDelivery)
|
||||
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||
|
||||
await b.resolveBlocks(blocks)
|
||||
let
|
||||
peerCtx = b.peers.get(peer)
|
||||
|
||||
if peerCtx != nil:
|
||||
# we don't care about this blocks anymore, lets cleanup the list
|
||||
await b.payForBlocks(peerCtx, blocks)
|
||||
peerCtx.cleanPresence(blocks.mapIt( it.cid ))
|
||||
await b.payForBlocks(peerCtx, blocksDelivery)
|
||||
## shouldn't we remove them from the want-list instead of this:
|
||||
peerCtx.cleanPresence(blocksDelivery.mapIt( it.address ))
|
||||
|
||||
proc wantListHandler*(
|
||||
b: BlockExcEngine,
|
||||
peer: PeerId,
|
||||
wantList: WantList) {.async.} =
|
||||
## Handle incoming want lists
|
||||
##
|
||||
|
||||
trace "Got want list for peer", peer, items = wantList.entries.len
|
||||
let peerCtx = b.peers.get(peer)
|
||||
let
|
||||
peerCtx = b.peers.get(peer)
|
||||
if isNil(peerCtx):
|
||||
return
|
||||
|
||||
var
|
||||
precense: seq[BlockPresence]
|
||||
presence: seq[BlockPresence]
|
||||
|
||||
for e in wantList.entries:
|
||||
let
|
||||
idx = peerCtx.peerWants.find(e)
|
||||
idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||
|
||||
logScope:
|
||||
peer = peerCtx.id
|
||||
cid = e.cid
|
||||
address = e.address
|
||||
wantType = $e.wantType
|
||||
|
||||
if idx < 0: # updating entry
|
||||
trace "Processing new want list entry", cid = e.cid
|
||||
|
||||
let
|
||||
have = await e.cid in b.localStore
|
||||
have = await e.address in b.localStore
|
||||
price = @(
|
||||
b.pricing.get(Pricing(price: 0.u256))
|
||||
.price.toBytesBE)
|
||||
|
||||
if e.wantType == WantType.WantHave:
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
|
||||
if not have and e.sendDontHave:
|
||||
trace "Adding dont have entry to precense response", cid = e.cid
|
||||
precense.add(
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
cid: e.cid.data.buffer,
|
||||
address: e.address,
|
||||
`type`: BlockPresenceType.DontHave,
|
||||
price: price))
|
||||
elif have and e.wantType == WantType.WantHave:
|
||||
trace "Adding have entry to precense response", cid = e.cid
|
||||
precense.add(
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
cid: e.cid.data.buffer,
|
||||
address: e.address,
|
||||
`type`: BlockPresenceType.Have,
|
||||
price: price))
|
||||
elif e.wantType == WantType.WantBlock:
|
||||
trace "Added entry to peer's want blocks list", cid = e.cid
|
||||
peerCtx.peerWants.add(e)
|
||||
codex_block_exchange_want_block_lists_received.inc()
|
||||
else:
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
trace "Removing entry from peer want list"
|
||||
peerCtx.peerWants.del(idx)
|
||||
else:
|
||||
trace "Updating entry in peer want list"
|
||||
# peer might want to ask for the same cid with
|
||||
# different want params
|
||||
peerCtx.peerWants[idx] = e # update entry
|
||||
|
||||
if precense.len > 0:
|
||||
trace "Sending precense to remote", items = precense.len
|
||||
await b.network.request.sendPresence(peer, precense)
|
||||
if presence.len > 0:
|
||||
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||
await b.network.request.sendPresence(peer, presence)
|
||||
|
||||
if not b.scheduleTask(peerCtx):
|
||||
trace "Unable to schedule task for peer", peer
|
||||
warn "Unable to schedule task for peer", peer
|
||||
|
||||
proc accountHandler*(
|
||||
engine: BlockExcEngine,
|
||||
peer: PeerId,
|
||||
account: Account) {.async.} =
|
||||
let context = engine.peers.get(peer)
|
||||
let
|
||||
context = engine.peers.get(peer)
|
||||
if context.isNil:
|
||||
return
|
||||
|
||||
|
@ -403,7 +481,8 @@ proc paymentHandler*(
|
|||
return
|
||||
|
||||
if channel =? context.paymentChannel:
|
||||
let sender = account.address
|
||||
let
|
||||
sender = account.address
|
||||
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
||||
else:
|
||||
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
||||
|
@ -413,6 +492,8 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
|||
## list exchange
|
||||
##
|
||||
|
||||
trace "Setting up peer", peer
|
||||
|
||||
if peer notin b.peers:
|
||||
trace "Setting up new peer", peer
|
||||
b.peers.add(BlockExcPeerCtx(
|
||||
|
@ -421,9 +502,11 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
|||
trace "Added peer", peers = b.peers.len
|
||||
|
||||
# broadcast our want list, the other peer will do the same
|
||||
if b.pendingBlocks.len > 0:
|
||||
if b.pendingBlocks.wantListLen > 0:
|
||||
trace "Sending our want list to a peer", peer
|
||||
let cids = toSeq(b.pendingBlocks.wantList)
|
||||
await b.network.request.sendWantList(
|
||||
peer, toSeq(b.pendingBlocks.wantList), full = true)
|
||||
peer, cids, full = true)
|
||||
|
||||
if address =? b.pricing.?address:
|
||||
await b.network.request.sendAccount(peer, Account(address: address))
|
||||
|
@ -438,8 +521,6 @@ proc dropPeer*(b: BlockExcEngine, peer: PeerId) =
|
|||
b.peers.remove(peer)
|
||||
|
||||
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||
trace "Handling task for peer", peer = task.id
|
||||
|
||||
# Send to the peer blocks he wants to get,
|
||||
# if they present in our local store
|
||||
|
||||
|
@ -448,38 +529,53 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
|||
|
||||
var
|
||||
wantsBlocks = task.peerWants.filterIt(
|
||||
it.wantType == WantType.WantBlock
|
||||
it.wantType == WantType.WantBlock and not it.inFlight
|
||||
)
|
||||
|
||||
if wantsBlocks.len > 0:
|
||||
trace "Got peer want blocks list", items = wantsBlocks.len
|
||||
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
|
||||
for peerWant in task.peerWants.mitems:
|
||||
if peerWant.address in addresses:
|
||||
peerWant.inFlight = inFlight
|
||||
|
||||
if wantsBlocks.len > 0:
|
||||
# Mark wants as in-flight.
|
||||
let wantAddresses = wantsBlocks.mapIt(it.address)
|
||||
updateInFlight(wantAddresses, true)
|
||||
wantsBlocks.sort(SortOrder.Descending)
|
||||
|
||||
let
|
||||
blockFuts = await allFinished(wantsBlocks.mapIt(
|
||||
b.localStore.getBlock(it.cid)
|
||||
))
|
||||
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
|
||||
if e.address.leaf:
|
||||
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
||||
(blkAndProof: (Block, CodexProof)) =>
|
||||
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
|
||||
)
|
||||
else:
|
||||
(await b.localStore.getBlock(e.address)).map(
|
||||
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
||||
)
|
||||
|
||||
# Extract successfully received blocks
|
||||
let
|
||||
blocks = blockFuts
|
||||
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
||||
blocksDelivery = blocksDeliveryFut
|
||||
.filterIt(it.completed and it.read.isOk)
|
||||
.mapIt(it.read.get)
|
||||
|
||||
if blocks.len > 0:
|
||||
trace "Sending blocks to peer", peer = task.id, blocks = blocks.len
|
||||
await b.network.request.sendBlocks(
|
||||
task.id,
|
||||
blocks)
|
||||
# All the wants that failed local lookup must be set to not-in-flight again.
|
||||
let
|
||||
successAddresses = blocksDelivery.mapIt(it.address)
|
||||
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
|
||||
updateInFlight(failedAddresses, false)
|
||||
|
||||
trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len
|
||||
# Remove successfully sent blocks
|
||||
task.peerWants.keepIf(
|
||||
proc(e: Entry): bool =
|
||||
not blocks.anyIt( it.cid == e.cid )
|
||||
if blocksDelivery.len > 0:
|
||||
trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt($it.address)).join(",")
|
||||
await b.network.request.sendBlocksDelivery(
|
||||
task.id,
|
||||
blocksDelivery
|
||||
)
|
||||
trace "Removed entries from peerWants", items = task.peerWants.len
|
||||
|
||||
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
||||
|
||||
task.peerWants.keepItIf(it.address notin successAddresses)
|
||||
|
||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
|
||||
## process tasks
|
||||
|
@ -490,21 +586,24 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
|
|||
let
|
||||
peerCtx = await b.taskQueue.pop()
|
||||
|
||||
trace "Got new task from queue", peerId = peerCtx.id
|
||||
await b.taskHandler(peerCtx)
|
||||
|
||||
trace "Exiting blockexc task runner"
|
||||
info "Exiting blockexc task runner"
|
||||
|
||||
proc new*(
|
||||
T: type BlockExcEngine,
|
||||
localStore: BlockStore,
|
||||
wallet: WalletRef,
|
||||
network: BlockExcNetwork,
|
||||
discovery: DiscoveryEngine,
|
||||
peerStore: PeerCtxStore,
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
concurrentTasks = DefaultConcurrentTasks,
|
||||
peersPerRequest = DefaultMaxPeersPerRequest): T =
|
||||
T: type BlockExcEngine,
|
||||
localStore: BlockStore,
|
||||
wallet: WalletRef,
|
||||
network: BlockExcNetwork,
|
||||
discovery: DiscoveryEngine,
|
||||
peerStore: PeerCtxStore,
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
concurrentTasks = DefaultConcurrentTasks,
|
||||
peersPerRequest = DefaultMaxPeersPerRequest,
|
||||
blockFetchTimeout = DefaultBlockTimeout,
|
||||
): BlockExcEngine =
|
||||
## Create new block exchange engine instance
|
||||
##
|
||||
|
||||
let
|
||||
engine = BlockExcEngine(
|
||||
|
@ -516,7 +615,8 @@ proc new*(
|
|||
wallet: wallet,
|
||||
concurrentTasks: concurrentTasks,
|
||||
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
||||
discovery: discovery)
|
||||
discovery: discovery,
|
||||
blockFetchTimeout: blockFetchTimeout)
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
|
@ -538,10 +638,10 @@ proc new*(
|
|||
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||
engine.blockPresenceHandler(peer, presence)
|
||||
|
||||
proc blocksHandler(
|
||||
proc blocksDeliveryHandler(
|
||||
peer: PeerId,
|
||||
blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
|
||||
engine.blocksHandler(peer, blocks)
|
||||
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
||||
engine.blocksDeliveryHandler(peer, blocksDelivery)
|
||||
|
||||
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
engine.accountHandler(peer, account)
|
||||
|
@ -551,7 +651,7 @@ proc new*(
|
|||
|
||||
network.handlers = BlockExcHandlers(
|
||||
onWantList: blockWantListHandler,
|
||||
onBlocks: blocksHandler,
|
||||
onBlocksDelivery: blocksDeliveryHandler,
|
||||
onPresence: blockPresenceHandler,
|
||||
onAccount: accountHandler,
|
||||
onPayment: paymentHandler)
|
||||
|
|
|
@ -8,21 +8,26 @@
|
|||
## those terms.
|
||||
|
||||
import std/tables
|
||||
import std/monotimes
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/metrics
|
||||
|
||||
import ../protobuf/blockexc
|
||||
import ../../blocktype
|
||||
import ../../logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex pendingblocks"
|
||||
|
||||
declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests")
|
||||
declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us")
|
||||
|
||||
const
|
||||
DefaultBlockTimeout* = 10.minutes
|
||||
|
||||
|
@ -30,83 +35,123 @@ type
|
|||
BlockReq* = object
|
||||
handle*: Future[Block]
|
||||
inFlight*: bool
|
||||
startTime*: int64
|
||||
|
||||
PendingBlocksManager* = ref object of RootObj
|
||||
blocks*: Table[Cid, BlockReq] # pending Block requests
|
||||
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
||||
|
||||
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid,
|
||||
address: BlockAddress,
|
||||
timeout = DefaultBlockTimeout,
|
||||
inFlight = false): Future[Block] {.async.} =
|
||||
## Add an event for a block
|
||||
##
|
||||
|
||||
try:
|
||||
if cid notin p.blocks:
|
||||
p.blocks[cid] = BlockReq(
|
||||
if address notin p.blocks:
|
||||
p.blocks[address] = BlockReq(
|
||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||
inFlight: inFlight)
|
||||
inFlight: inFlight,
|
||||
startTime: getMonoTime().ticks)
|
||||
|
||||
trace "Adding pending future for block", cid, inFlight = p.blocks[cid].inFlight
|
||||
|
||||
return await p.blocks[cid].handle.wait(timeout)
|
||||
p.updatePendingBlockGauge()
|
||||
return await p.blocks[address].handle.wait(timeout)
|
||||
except CancelledError as exc:
|
||||
trace "Blocks cancelled", exc = exc.msg, cid
|
||||
trace "Blocks cancelled", exc = exc.msg, address
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Pending WANT failed or expired", exc = exc.msg
|
||||
error "Pending WANT failed or expired", exc = exc.msg
|
||||
# no need to cancel, it is already cancelled by wait()
|
||||
raise exc
|
||||
finally:
|
||||
p.blocks.del(cid)
|
||||
p.blocks.del(address)
|
||||
p.updatePendingBlockGauge()
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid,
|
||||
timeout = DefaultBlockTimeout,
|
||||
inFlight = false): Future[Block] =
|
||||
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
|
||||
|
||||
proc resolve*(
|
||||
p: PendingBlocksManager,
|
||||
blocks: seq[Block]) =
|
||||
blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} =
|
||||
## Resolve pending blocks
|
||||
##
|
||||
|
||||
for blk in blocks:
|
||||
# resolve any pending blocks
|
||||
p.blocks.withValue(blk.cid, pending):
|
||||
if not pending[].handle.completed:
|
||||
trace "Resolving block", cid = blk.cid
|
||||
pending[].handle.complete(blk)
|
||||
for bd in blocksDelivery:
|
||||
p.blocks.withValue(bd.address, blockReq):
|
||||
if not blockReq.handle.finished:
|
||||
let
|
||||
startTime = blockReq.startTime
|
||||
stopTime = getMonoTime().ticks
|
||||
retrievalDurationUs = (stopTime - startTime) div 1000
|
||||
|
||||
blockReq.handle.complete(bd.blk)
|
||||
|
||||
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
|
||||
|
||||
if retrievalDurationUs > 500000:
|
||||
warn "High block retrieval time", retrievalDurationUs, address = bd.address
|
||||
else:
|
||||
trace "Block handle already finished", address = bd.address
|
||||
|
||||
proc setInFlight*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid,
|
||||
address: BlockAddress,
|
||||
inFlight = true) =
|
||||
p.blocks.withValue(cid, pending):
|
||||
## Set inflight status for a block
|
||||
##
|
||||
|
||||
p.blocks.withValue(address, pending):
|
||||
pending[].inFlight = inFlight
|
||||
trace "Setting inflight", cid, inFlight = pending[].inFlight
|
||||
|
||||
proc isInFlight*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid): bool =
|
||||
p.blocks.withValue(cid, pending):
|
||||
address: BlockAddress): bool =
|
||||
## Check if a block is in flight
|
||||
##
|
||||
|
||||
p.blocks.withValue(address, pending):
|
||||
result = pending[].inFlight
|
||||
trace "Getting inflight", cid, inFlight = result
|
||||
|
||||
proc pending*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid): bool = cid in p.blocks
|
||||
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
|
||||
BlockAddress.init(cid) in p.blocks
|
||||
|
||||
proc contains*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid): bool = p.pending(cid)
|
||||
proc contains*(p: PendingBlocksManager, address: BlockAddress): bool =
|
||||
address in p.blocks
|
||||
|
||||
iterator wantList*(p: PendingBlocksManager): Cid =
|
||||
for k in p.blocks.keys:
|
||||
yield k
|
||||
iterator wantList*(p: PendingBlocksManager): BlockAddress =
|
||||
for a in p.blocks.keys:
|
||||
yield a
|
||||
|
||||
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
|
||||
for a in p.blocks.keys:
|
||||
if not a.leaf:
|
||||
yield a.cid
|
||||
|
||||
iterator wantListCids*(p: PendingBlocksManager): Cid =
|
||||
var yieldedCids = initHashSet[Cid]()
|
||||
for a in p.blocks.keys:
|
||||
let cid = a.cidOrTreeCid
|
||||
if cid notin yieldedCids:
|
||||
yieldedCids.incl(cid)
|
||||
yield cid
|
||||
|
||||
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
|
||||
for v in p.blocks.values:
|
||||
yield v.handle
|
||||
|
||||
proc wantListLen*(p: PendingBlocksManager): int =
|
||||
p.blocks.len
|
||||
|
||||
func len*(p: PendingBlocksManager): int =
|
||||
p.blocks.len
|
||||
|
||||
func new*(T: type PendingBlocksManager): T =
|
||||
T()
|
||||
func new*(T: type PendingBlocksManager): PendingBlocksManager =
|
||||
PendingBlocksManager()
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
import std/tables
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
|
||||
import pkg/libp2p
|
||||
|
@ -19,6 +18,7 @@ import pkg/questionable
|
|||
import pkg/questionable/results
|
||||
|
||||
import ../../blocktype as bt
|
||||
import ../../logutils
|
||||
import ../protobuf/blockexc as pb
|
||||
import ../protobuf/payments
|
||||
|
||||
|
@ -34,47 +34,61 @@ const
|
|||
MaxInflight* = 100
|
||||
|
||||
type
|
||||
WantListHandler* = proc(peer: PeerID, wantList: WantList): Future[void] {.gcsafe.}
|
||||
BlocksHandler* = proc(peer: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
|
||||
BlockPresenceHandler* = proc(peer: PeerID, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountHandler* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentHandler* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.}
|
||||
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
||||
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
onBlocksDelivery*: BlocksDeliveryHandler
|
||||
onPresence*: BlockPresenceHandler
|
||||
onAccount*: AccountHandler
|
||||
onPayment*: PaymentHandler
|
||||
|
||||
WantListSender* = proc(
|
||||
id: PeerID,
|
||||
cids: seq[Cid],
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false): Future[void] {.gcsafe.}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
onBlocks*: BlocksHandler
|
||||
onPresence*: BlockPresenceHandler
|
||||
onAccount*: AccountHandler
|
||||
onPayment*: PaymentHandler
|
||||
|
||||
BlocksSender* = proc(peer: PeerID, presence: seq[bt.Block]): Future[void] {.gcsafe.}
|
||||
PresenceSender* = proc(peer: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountSender* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentSender* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.}
|
||||
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
||||
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
|
||||
BlockExcRequest* = object
|
||||
sendWantList*: WantListSender
|
||||
sendBlocks*: BlocksSender
|
||||
sendWantCancellations*: WantCancellationSender
|
||||
sendBlocksDelivery*: BlocksDeliverySender
|
||||
sendPresence*: PresenceSender
|
||||
sendAccount*: AccountSender
|
||||
sendPayment*: PaymentSender
|
||||
|
||||
BlockExcNetwork* = ref object of LPProtocol
|
||||
peers*: Table[PeerID, NetworkPeer]
|
||||
peers*: Table[PeerId, NetworkPeer]
|
||||
switch*: Switch
|
||||
handlers*: BlockExcHandlers
|
||||
request*: BlockExcRequest
|
||||
getConn: ConnProvider
|
||||
inflightSema: AsyncSemaphore
|
||||
|
||||
proc peerId*(b: BlockExcNetwork): PeerId =
|
||||
## Return peer id
|
||||
##
|
||||
|
||||
return b.switch.peerInfo.peerId
|
||||
|
||||
proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
|
||||
## Check if peer is self
|
||||
##
|
||||
|
||||
return b.peerId == peer
|
||||
|
||||
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
## Send message to peer
|
||||
##
|
||||
|
@ -82,8 +96,11 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
|||
b.peers.withValue(id, peer):
|
||||
try:
|
||||
await b.inflightSema.acquire()
|
||||
trace "Sending message to peer", peer = id
|
||||
await peer[].send(msg)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as err:
|
||||
error "Error sending message", peer = id, msg = err.msg
|
||||
finally:
|
||||
b.inflightSema.release()
|
||||
do:
|
||||
|
@ -97,31 +114,12 @@ proc handleWantList(
|
|||
##
|
||||
|
||||
if not b.handlers.onWantList.isNil:
|
||||
trace "Handling want list for peer", peer = peer.id, items = list.entries.len
|
||||
await b.handlers.onWantList(peer.id, list)
|
||||
|
||||
# TODO: make into a template
|
||||
proc makeWantList*(
|
||||
cids: seq[Cid],
|
||||
priority: int = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false): WantList =
|
||||
WantList(
|
||||
entries: cids.mapIt(
|
||||
Entry(
|
||||
`block`: it.data.buffer,
|
||||
priority: priority.int32,
|
||||
cancel: cancel,
|
||||
wantType: wantType,
|
||||
sendDontHave: sendDontHave) ),
|
||||
full: full)
|
||||
|
||||
proc sendWantList*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerID,
|
||||
cids: seq[Cid],
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
|
@ -130,57 +128,45 @@ proc sendWantList*(
|
|||
## Send a want message to peer
|
||||
##
|
||||
|
||||
trace "Sending want list to peer", peer = id, `type` = $wantType, items = cids.len
|
||||
let msg = makeWantList(
|
||||
cids,
|
||||
priority,
|
||||
cancel,
|
||||
wantType,
|
||||
full,
|
||||
sendDontHave)
|
||||
let msg = WantList(
|
||||
entries: addresses.mapIt(
|
||||
WantListEntry(
|
||||
address: it,
|
||||
priority: priority,
|
||||
cancel: cancel,
|
||||
wantType: wantType,
|
||||
sendDontHave: sendDontHave) ),
|
||||
full: full)
|
||||
|
||||
b.send(id, Message(wantlist: msg))
|
||||
|
||||
proc handleBlocks(
|
||||
proc sendWantCancellations*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress]): Future[void] {.async.} =
|
||||
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||
##
|
||||
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||
|
||||
proc handleBlocksDelivery(
|
||||
b: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
blocks: seq[pb.Block]) {.async.} =
|
||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
## Handle incoming blocks
|
||||
##
|
||||
|
||||
if not b.handlers.onBlocks.isNil:
|
||||
trace "Handling blocks for peer", peer = peer.id, items = blocks.len
|
||||
if not b.handlers.onBlocksDelivery.isNil:
|
||||
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
|
||||
|
||||
var blks: seq[bt.Block]
|
||||
for blob in blocks:
|
||||
without cid =? Cid.init(blob.prefix):
|
||||
trace "Unable to initialize Cid from protobuf message"
|
||||
|
||||
without blk =? bt.Block.new(cid, blob.data, verify = true):
|
||||
trace "Unable to initialize Block from data"
|
||||
|
||||
blks.add(blk)
|
||||
|
||||
await b.handlers.onBlocks(peer.id, blks)
|
||||
|
||||
template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] =
|
||||
var blks: seq[pb.Block]
|
||||
for blk in blocks:
|
||||
blks.add(pb.Block(
|
||||
prefix: blk.cid.data.buffer,
|
||||
data: blk.data
|
||||
))
|
||||
|
||||
blks
|
||||
|
||||
proc sendBlocks*(
|
||||
proc sendBlocksDelivery*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerID,
|
||||
blocks: seq[bt.Block]): Future[void] =
|
||||
id: PeerId,
|
||||
blocksDelivery: seq[BlockDelivery]): Future[void] =
|
||||
## Send blocks to remote
|
||||
##
|
||||
|
||||
b.send(id, pb.Message(payload: makeBlocks(blocks)))
|
||||
b.send(id, pb.Message(payload: blocksDelivery))
|
||||
|
||||
proc handleBlockPresence(
|
||||
b: BlockExcNetwork,
|
||||
|
@ -190,12 +176,11 @@ proc handleBlockPresence(
|
|||
##
|
||||
|
||||
if not b.handlers.onPresence.isNil:
|
||||
trace "Handling block presence for peer", peer = peer.id, items = presence.len
|
||||
await b.handlers.onPresence(peer.id, presence)
|
||||
|
||||
proc sendBlockPresence*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerID,
|
||||
id: PeerId,
|
||||
presence: seq[BlockPresence]): Future[void] =
|
||||
## Send presence to remote
|
||||
##
|
||||
|
@ -240,43 +225,46 @@ proc handlePayment(
|
|||
if not network.handlers.onPayment.isNil:
|
||||
await network.handlers.onPayment(peer.id, payment)
|
||||
|
||||
proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.async.} =
|
||||
try:
|
||||
if msg.wantlist.entries.len > 0:
|
||||
asyncSpawn b.handleWantList(peer, msg.wantlist)
|
||||
proc rpcHandler(
|
||||
b: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
msg: Message) {.raises: [].} =
|
||||
## handle rpc messages
|
||||
##
|
||||
if msg.wantList.entries.len > 0:
|
||||
asyncSpawn b.handleWantList(peer, msg.wantList)
|
||||
|
||||
if msg.payload.len > 0:
|
||||
asyncSpawn b.handleBlocks(peer, msg.payload)
|
||||
if msg.payload.len > 0:
|
||||
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
|
||||
|
||||
if msg.blockPresences.len > 0:
|
||||
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
|
||||
if msg.blockPresences.len > 0:
|
||||
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
|
||||
|
||||
if account =? Account.init(msg.account):
|
||||
asyncSpawn b.handleAccount(peer, account)
|
||||
if account =? Account.init(msg.account):
|
||||
asyncSpawn b.handleAccount(peer, account)
|
||||
|
||||
if payment =? SignedState.init(msg.payment):
|
||||
asyncSpawn b.handlePayment(peer, payment)
|
||||
if payment =? SignedState.init(msg.payment):
|
||||
asyncSpawn b.handlePayment(peer, payment)
|
||||
|
||||
except CatchableError as exc:
|
||||
trace "Exception in blockexc rpc handler", exc = exc.msg
|
||||
|
||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
|
||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
## Creates or retrieves a BlockExcNetwork Peer
|
||||
##
|
||||
|
||||
if peer in b.peers:
|
||||
return b.peers.getOrDefault(peer, nil)
|
||||
|
||||
var getConn = proc(): Future[Connection] {.async.} =
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
||||
try:
|
||||
return await b.switch.dial(peer, Codec)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as exc:
|
||||
trace "Unable to connect to blockexc peer", exc = exc.msg
|
||||
|
||||
if not isNil(b.getConn):
|
||||
getConn = b.getConn
|
||||
|
||||
let rpcHandler = proc (p: NetworkPeer, msg: Message): Future[void] =
|
||||
let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} =
|
||||
b.rpcHandler(p, msg)
|
||||
|
||||
# create new pubsub peer
|
||||
|
@ -287,7 +275,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
|
|||
|
||||
return blockExcPeer
|
||||
|
||||
proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
|
||||
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
## Perform initial setup, such as want
|
||||
## list exchange
|
||||
##
|
||||
|
@ -295,9 +283,16 @@ proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
|
|||
discard b.getOrCreatePeer(peer)
|
||||
|
||||
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||
## Dial a peer
|
||||
##
|
||||
|
||||
if b.isSelf(peer.peerId):
|
||||
trace "Skipping dialing self", peer = peer.peerId
|
||||
return
|
||||
|
||||
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||
|
||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerID) =
|
||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
|
@ -307,7 +302,7 @@ method init*(b: BlockExcNetwork) =
|
|||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
b.setupPeer(peerId)
|
||||
else:
|
||||
|
@ -328,7 +323,7 @@ proc new*(
|
|||
T: type BlockExcNetwork,
|
||||
switch: Switch,
|
||||
connProvider: ConnProvider = nil,
|
||||
maxInflight = MaxInflight): T =
|
||||
maxInflight = MaxInflight): BlockExcNetwork =
|
||||
## Create a new BlockExcNetwork instance
|
||||
##
|
||||
|
||||
|
@ -339,8 +334,8 @@ proc new*(
|
|||
inflightSema: newAsyncSemaphore(maxInflight))
|
||||
|
||||
proc sendWantList(
|
||||
id: PeerID,
|
||||
cids: seq[Cid],
|
||||
id: PeerId,
|
||||
cids: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
|
@ -350,21 +345,25 @@ proc new*(
|
|||
id, cids, priority, cancel,
|
||||
wantType, full, sendDontHave)
|
||||
|
||||
proc sendBlocks(id: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
|
||||
self.sendBlocks(id, blocks)
|
||||
proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} =
|
||||
self.sendWantCancellations(id, addresses)
|
||||
|
||||
proc sendPresence(id: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
||||
self.sendBlocksDelivery(id, blocksDelivery)
|
||||
|
||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||
self.sendBlockPresence(id, presence)
|
||||
|
||||
proc sendAccount(id: PeerID, account: Account): Future[void] {.gcsafe.} =
|
||||
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
self.sendAccount(id, account)
|
||||
|
||||
proc sendPayment(id: PeerID, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
self.sendPayment(id, payment)
|
||||
|
||||
self.request = BlockExcRequest(
|
||||
sendWantList: sendWantList,
|
||||
sendBlocks: sendBlocks,
|
||||
sendWantCancellations: sendWantCancellations,
|
||||
sendBlocksDelivery: sendBlocksDelivery,
|
||||
sendPresence: sendPresence,
|
||||
sendAccount: sendAccount,
|
||||
sendPayment: sendPayment)
|
||||
|
|
|
@ -11,18 +11,16 @@ import pkg/upraises
|
|||
push: {.upraises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
|
||||
import ../protobuf/blockexc
|
||||
import ../protobuf/message
|
||||
import ../../errors
|
||||
import ../../logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetworkpeer"
|
||||
|
||||
const
|
||||
MaxMessageSize = 100 * 1 shl 20 # manifest files can be big
|
||||
|
||||
type
|
||||
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
||||
|
||||
|
@ -45,12 +43,13 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
|||
try:
|
||||
while not conn.atEof or not conn.closed:
|
||||
let
|
||||
data = await conn.readLp(MaxMessageSize)
|
||||
msg = Message.ProtobufDecode(data).mapFailure().tryGet()
|
||||
trace "Got message for peer", peer = b.id
|
||||
data = await conn.readLp(MaxMessageSize.int)
|
||||
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
||||
await b.handler(b, msg)
|
||||
except CatchableError as exc:
|
||||
trace "Exception in blockexc read loop", exc = exc.msg
|
||||
except CancelledError:
|
||||
trace "Read loop cancelled"
|
||||
except CatchableError as err:
|
||||
warn "Exception in blockexc read loop", msg = err.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
|
@ -66,18 +65,17 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} =
|
|||
let conn = await b.connect()
|
||||
|
||||
if isNil(conn):
|
||||
trace "Unable to get send connection for peer message not sent", peer = b.id
|
||||
warn "Unable to get send connection for peer message not sent", peer = b.id
|
||||
return
|
||||
|
||||
trace "Sending message to remote", peer = b.id
|
||||
await conn.writeLp(ProtobufEncode(msg))
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
|
||||
proc broadcast*(b: NetworkPeer, msg: Message) =
|
||||
proc sendAwaiter() {.async.} =
|
||||
try:
|
||||
await b.send(msg)
|
||||
except CatchableError as exc:
|
||||
trace "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
|
||||
warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
|
||||
|
||||
asyncSpawn sendAwaiter()
|
||||
|
||||
|
@ -85,7 +83,7 @@ func new*(
|
|||
T: type NetworkPeer,
|
||||
peer: PeerId,
|
||||
connProvider: ConnProvider,
|
||||
rpcHandler: RPCHandler): T =
|
||||
rpcHandler: RPCHandler): NetworkPeer =
|
||||
|
||||
doAssert(not isNil(connProvider),
|
||||
"should supply connection provider")
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import std/sets
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/chronos
|
||||
import pkg/nitro
|
||||
|
@ -20,42 +20,47 @@ import ../protobuf/blockexc
|
|||
import ../protobuf/payments
|
||||
import ../protobuf/presence
|
||||
|
||||
export payments, nitro
|
||||
import ../../blocktype
|
||||
import ../../logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex peercontext"
|
||||
export payments, nitro
|
||||
|
||||
type
|
||||
BlockExcPeerCtx* = ref object of RootObj
|
||||
id*: PeerID
|
||||
blocks*: Table[Cid, Presence] # remote peer have list including price
|
||||
peerWants*: seq[Entry] # remote peers want lists
|
||||
exchanged*: int # times peer has exchanged with us
|
||||
lastExchange*: Moment # last time peer has exchanged with us
|
||||
account*: ?Account # ethereum account of this peer
|
||||
paymentChannel*: ?ChannelId # payment channel id
|
||||
id*: PeerId
|
||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||
peerWants*: seq[WantListEntry] # remote peers want lists
|
||||
exchanged*: int # times peer has exchanged with us
|
||||
lastExchange*: Moment # last time peer has exchanged with us
|
||||
account*: ?Account # ethereum account of this peer
|
||||
paymentChannel*: ?ChannelId # payment channel id
|
||||
|
||||
proc peerHave*(self: BlockExcPeerCtx): seq[Cid] =
|
||||
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
|
||||
toSeq(self.blocks.keys)
|
||||
|
||||
proc contains*(self: BlockExcPeerCtx, cid: Cid): bool =
|
||||
cid in self.blocks
|
||||
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
|
||||
|
||||
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
|
||||
|
||||
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
address in self.blocks
|
||||
|
||||
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
||||
self.blocks[presence.cid] = presence
|
||||
self.blocks[presence.address] = presence
|
||||
|
||||
func cleanPresence*(self: BlockExcPeerCtx, cids: seq[Cid]) =
|
||||
for cid in cids:
|
||||
self.blocks.del(cid)
|
||||
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
||||
for a in addresses:
|
||||
self.blocks.del(a)
|
||||
|
||||
func cleanPresence*(self: BlockExcPeerCtx, cid: Cid) =
|
||||
self.cleanPresence(@[cid])
|
||||
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
self.cleanPresence(@[address])
|
||||
|
||||
func price*(self: BlockExcPeerCtx, cids: seq[Cid]): UInt256 =
|
||||
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
|
||||
var price = 0.u256
|
||||
for cid in cids:
|
||||
self.blocks.withValue(cid, precense):
|
||||
for a in addresses:
|
||||
self.blocks.withValue(a, precense):
|
||||
price += precense[].price
|
||||
|
||||
trace "Blocks price", price
|
||||
price
|
||||
|
|
|
@ -16,10 +16,12 @@ import pkg/upraises
|
|||
push: {.upraises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
|
||||
import ../protobuf/blockexc
|
||||
import ../../blocktype
|
||||
import ../../logutils
|
||||
|
||||
|
||||
import ./peercontext
|
||||
export peercontext
|
||||
|
@ -29,56 +31,59 @@ logScope:
|
|||
|
||||
type
|
||||
PeerCtxStore* = ref object of RootObj
|
||||
peers*: OrderedTable[PeerID, BlockExcPeerCtx]
|
||||
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
|
||||
|
||||
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
|
||||
for p in self.peers.values:
|
||||
yield p
|
||||
|
||||
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerID): bool =
|
||||
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
|
||||
## Convenience method to check for peer precense
|
||||
##
|
||||
|
||||
a.anyIt( it.id == b )
|
||||
|
||||
func contains*(self: PeerCtxStore, peerId: PeerID): bool =
|
||||
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
|
||||
peerId in self.peers
|
||||
|
||||
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
|
||||
trace "Adding peer to peer context store", peer = peer.id
|
||||
self.peers[peer.id] = peer
|
||||
|
||||
func remove*(self: PeerCtxStore, peerId: PeerID) =
|
||||
trace "Removing peer from peer context store", peer = peerId
|
||||
func remove*(self: PeerCtxStore, peerId: PeerId) =
|
||||
self.peers.del(peerId)
|
||||
|
||||
func get*(self: PeerCtxStore, peerId: PeerID): BlockExcPeerCtx =
|
||||
trace "Retrieving peer from peer context store", peer = peerId
|
||||
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
|
||||
self.peers.getOrDefault(peerId, nil)
|
||||
|
||||
func len*(self: PeerCtxStore): int =
|
||||
self.peers.len
|
||||
|
||||
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
|
||||
|
||||
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == cid ) )
|
||||
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) )
|
||||
|
||||
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
|
||||
|
||||
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.cid == cid ) )
|
||||
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
|
||||
|
||||
func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
var
|
||||
peers = self.peersHave(cid)
|
||||
func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
# assume that the price for all leaves in a tree is the same
|
||||
let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid)
|
||||
var peers = self.peersHave(rootAddress)
|
||||
|
||||
trace "Selecting cheapest peers", peers = peers.len
|
||||
func cmp(a, b: BlockExcPeerCtx): int =
|
||||
var
|
||||
priceA = 0.u256
|
||||
priceB = 0.u256
|
||||
|
||||
a.blocks.withValue(cid, precense):
|
||||
a.blocks.withValue(rootAddress, precense):
|
||||
priceA = precense[].price
|
||||
|
||||
b.blocks.withValue(cid, precense):
|
||||
b.blocks.withValue(rootAddress, precense):
|
||||
priceB = precense[].price
|
||||
|
||||
if priceA == priceB:
|
||||
|
@ -93,5 +98,5 @@ func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
|||
return peers
|
||||
|
||||
proc new*(T: type PeerCtxStore): PeerCtxStore =
|
||||
T(
|
||||
peers: initOrderedTable[PeerID, BlockExcPeerCtx]())
|
||||
## create new instance of a peer context store
|
||||
PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]())
|
||||
|
|
|
@ -9,47 +9,45 @@
|
|||
|
||||
import std/hashes
|
||||
import std/sequtils
|
||||
import pkg/libp2p
|
||||
import pkg/stew/endians2
|
||||
|
||||
import message
|
||||
|
||||
export Message, ProtobufEncode, ProtobufDecode
|
||||
export Wantlist, WantType, Entry
|
||||
export Block, BlockPresenceType, BlockPresence
|
||||
import ../../blocktype
|
||||
|
||||
export Message, protobufEncode, protobufDecode
|
||||
export Wantlist, WantType, WantListEntry
|
||||
export BlockDelivery, BlockPresenceType, BlockPresence
|
||||
export AccountMessage, StateChannelUpdate
|
||||
|
||||
proc hash*(e: Entry): Hash =
|
||||
hash(e.`block`)
|
||||
proc hash*(a: BlockAddress): Hash =
|
||||
if a.leaf:
|
||||
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||
hash(data)
|
||||
else:
|
||||
hash(a.cid.data.buffer)
|
||||
|
||||
proc cid*(e: Entry): Cid =
|
||||
## Helper to convert raw bytes to Cid
|
||||
##
|
||||
proc hash*(e: WantListEntry): Hash =
|
||||
hash(e.address)
|
||||
|
||||
Cid.init(e.`block`).get()
|
||||
|
||||
proc contains*(a: openArray[Entry], b: Cid): bool =
|
||||
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
|
||||
## Convenience method to check for peer precense
|
||||
##
|
||||
|
||||
a.filterIt( it.cid == b ).len > 0
|
||||
a.anyIt(it.address == b)
|
||||
|
||||
proc `==`*(a: Entry, cid: Cid): bool =
|
||||
return a.cid == cid
|
||||
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
|
||||
return a.address == b
|
||||
|
||||
proc `<`*(a, b: Entry): bool =
|
||||
proc `<`*(a, b: WantListEntry): bool =
|
||||
a.priority < b.priority
|
||||
|
||||
proc cid*(e: BlockPresence): Cid =
|
||||
## Helper to convert raw bytes to Cid
|
||||
##
|
||||
|
||||
Cid.init(e.cid).get()
|
||||
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
|
||||
return a.address == b
|
||||
|
||||
proc `==`*(a: BlockPresence, cid: Cid): bool =
|
||||
return cid(a) == cid
|
||||
|
||||
proc contains*(a: openArray[BlockPresence], b: Cid): bool =
|
||||
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
|
||||
## Convenience method to check for peer precense
|
||||
##
|
||||
|
||||
a.filterIt( cid(it) == b ).len > 0
|
||||
a.anyIt(it.address == b)
|
||||
|
|
|
@ -2,36 +2,50 @@
|
|||
# and Protobuf encoder/decoder for these messages.
|
||||
#
|
||||
# Eventually all this code should be auto-generated from message.proto.
|
||||
import std/sugar
|
||||
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/libp2p/cid
|
||||
|
||||
import pkg/questionable
|
||||
|
||||
import ../../units
|
||||
|
||||
import ../../merkletree
|
||||
import ../../blocktype
|
||||
|
||||
const
|
||||
MaxBlockSize* = 100.MiBs.uint
|
||||
MaxMessageSize* = 100.MiBs.uint
|
||||
|
||||
type
|
||||
WantType* = enum
|
||||
WantBlock = 0,
|
||||
WantHave = 1
|
||||
|
||||
Entry* = object
|
||||
`block`*: seq[byte] # The block cid
|
||||
WantListEntry* = object
|
||||
address*: BlockAddress
|
||||
priority*: int32 # The priority (normalized). default to 1
|
||||
cancel*: bool # Whether this revokes an entry
|
||||
wantType*: WantType # Note: defaults to enum 0, ie Block
|
||||
sendDontHave*: bool # Note: defaults to false
|
||||
inFlight*: bool # Whether block sending is in progress. Not serialized.
|
||||
|
||||
Wantlist* = object
|
||||
entries*: seq[Entry] # A list of wantlist entries
|
||||
full*: bool # Whether this is the full wantlist. default to false
|
||||
WantList* = object
|
||||
entries*: seq[WantListEntry] # A list of wantList entries
|
||||
full*: bool # Whether this is the full wantList. default to false
|
||||
|
||||
Block* = object
|
||||
prefix*: seq[byte] # CID prefix (cid version, multicodec and multihash prefix (type + length)
|
||||
data*: seq[byte]
|
||||
BlockDelivery* = object
|
||||
blk*: Block
|
||||
address*: BlockAddress
|
||||
proof*: ?CodexProof # Present only if `address.leaf` is true
|
||||
|
||||
BlockPresenceType* = enum
|
||||
Have = 0,
|
||||
DontHave = 1
|
||||
|
||||
BlockPresence* = object
|
||||
cid*: seq[byte] # The block cid
|
||||
address*: BlockAddress
|
||||
`type`*: BlockPresenceType
|
||||
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
|
||||
|
||||
|
@ -42,8 +56,8 @@ type
|
|||
update*: seq[byte] # Signed Nitro state, serialized as JSON
|
||||
|
||||
Message* = object
|
||||
wantlist*: Wantlist
|
||||
payload*: seq[Block]
|
||||
wantList*: WantList
|
||||
payload*: seq[BlockDelivery]
|
||||
blockPresences*: seq[BlockPresence]
|
||||
pendingBytes*: uint
|
||||
account*: AccountMessage
|
||||
|
@ -53,9 +67,20 @@ type
|
|||
# Encoding Message into seq[byte] in Protobuf format
|
||||
#
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.`block`)
|
||||
ipb.write(1, value.leaf.uint)
|
||||
if value.leaf:
|
||||
ipb.write(2, value.treeCid.data.buffer)
|
||||
ipb.write(3, value.index.uint64)
|
||||
else:
|
||||
ipb.write(4, value.cid.data.buffer)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.address)
|
||||
ipb.write(2, value.priority.uint64)
|
||||
ipb.write(3, value.cancel.uint)
|
||||
ipb.write(4, value.wantType.uint)
|
||||
|
@ -63,7 +88,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
|
|||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
||||
var ipb = initProtoBuffer()
|
||||
for v in value.entries:
|
||||
ipb.write(1, v)
|
||||
|
@ -71,16 +96,20 @@ proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
|
|||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Block) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.prefix)
|
||||
ipb.write(2, value.data)
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
||||
ipb.write(1, value.blk.cid.data.buffer)
|
||||
ipb.write(2, value.blk.data)
|
||||
ipb.write(3, value.address)
|
||||
if value.address.leaf:
|
||||
if proof =? value.proof:
|
||||
ipb.write(4, proof.encode())
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.cid)
|
||||
ipb.write(1, value.address)
|
||||
ipb.write(2, value.`type`.uint)
|
||||
ipb.write(3, value.price)
|
||||
ipb.finish()
|
||||
|
@ -98,9 +127,9 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
|||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc ProtobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.wantlist)
|
||||
proc protobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
||||
ipb.write(1, value.wantList)
|
||||
for v in value.payload:
|
||||
ipb.write(3, v)
|
||||
for v in value.blockPresences:
|
||||
|
@ -115,12 +144,40 @@ proc ProtobufEncode*(value: Message): seq[byte] =
|
|||
#
|
||||
# Decoding Message from seq[byte] in Protobuf format
|
||||
#
|
||||
|
||||
proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
|
||||
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
|
||||
var
|
||||
value = Entry()
|
||||
value: BlockAddress
|
||||
leaf: bool
|
||||
field: uint64
|
||||
discard ? pb.getField(1, value.`block`)
|
||||
cidBuf = newSeq[byte]()
|
||||
|
||||
if ? pb.getField(1, field):
|
||||
leaf = bool(field)
|
||||
|
||||
if leaf:
|
||||
var
|
||||
treeCid: Cid
|
||||
index: Natural
|
||||
if ? pb.getField(2, cidBuf):
|
||||
treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ? pb.getField(3, field):
|
||||
index = field
|
||||
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
|
||||
else:
|
||||
var cid: Cid
|
||||
if ? pb.getField(4, cidBuf):
|
||||
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
value = BlockAddress(leaf: false, cid: cid)
|
||||
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] =
|
||||
var
|
||||
value = WantListEntry()
|
||||
field: uint64
|
||||
ipb: ProtoBuffer
|
||||
if ? pb.getField(1, ipb):
|
||||
value.address = ? BlockAddress.decode(ipb)
|
||||
if ? pb.getField(2, field):
|
||||
value.priority = int32(field)
|
||||
if ? pb.getField(3, field):
|
||||
|
@ -131,30 +188,52 @@ proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
|
|||
value.sendDontHave = bool(field)
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type Wantlist, pb: ProtoBuffer): ProtoResult[Wantlist] =
|
||||
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
|
||||
var
|
||||
value = Wantlist()
|
||||
value = WantList()
|
||||
field: uint64
|
||||
sublist: seq[seq[byte]]
|
||||
if ? pb.getRepeatedField(1, sublist):
|
||||
for item in sublist:
|
||||
value.entries.add(? Entry.decode(initProtoBuffer(item)))
|
||||
value.entries.add(? WantListEntry.decode(initProtoBuffer(item)))
|
||||
if ? pb.getField(2, field):
|
||||
value.full = bool(field)
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type Block, pb: ProtoBuffer): ProtoResult[Block] =
|
||||
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
|
||||
var
|
||||
value = Block()
|
||||
discard ? pb.getField(1, value.prefix)
|
||||
discard ? pb.getField(2, value.data)
|
||||
value = BlockDelivery()
|
||||
dataBuf = newSeq[byte]()
|
||||
cidBuf = newSeq[byte]()
|
||||
cid: Cid
|
||||
ipb: ProtoBuffer
|
||||
|
||||
if ? pb.getField(1, cidBuf):
|
||||
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ? pb.getField(2, dataBuf):
|
||||
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ? pb.getField(3, ipb):
|
||||
value.address = ? BlockAddress.decode(ipb)
|
||||
|
||||
if value.address.leaf:
|
||||
var proofBuf = newSeq[byte]()
|
||||
if ? pb.getField(4, proofBuf):
|
||||
let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
value.proof = proof.some
|
||||
else:
|
||||
value.proof = CodexProof.none
|
||||
else:
|
||||
value.proof = CodexProof.none
|
||||
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
|
||||
var
|
||||
value = BlockPresence()
|
||||
field: uint64
|
||||
discard ? pb.getField(1, value.cid)
|
||||
ipb: ProtoBuffer
|
||||
if ? pb.getField(1, ipb):
|
||||
value.address = ? BlockAddress.decode(ipb)
|
||||
if ? pb.getField(2, field):
|
||||
value.`type` = BlockPresenceType(field)
|
||||
discard ? pb.getField(3, value.price)
|
||||
|
@ -172,17 +251,17 @@ proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChan
|
|||
discard ? pb.getField(1, value.update)
|
||||
ok(value)
|
||||
|
||||
proc ProtobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
var
|
||||
value = Message()
|
||||
pb = initProtoBuffer(msg)
|
||||
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
||||
ipb: ProtoBuffer
|
||||
sublist: seq[seq[byte]]
|
||||
if ? pb.getField(1, ipb):
|
||||
value.wantlist = ? Wantlist.decode(ipb)
|
||||
value.wantList = ? WantList.decode(ipb)
|
||||
if ? pb.getRepeatedField(3, sublist):
|
||||
for item in sublist:
|
||||
value.payload.add(? Block.decode(initProtoBuffer(item)))
|
||||
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
|
||||
if ? pb.getRepeatedField(4, sublist):
|
||||
for item in sublist:
|
||||
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))
|
||||
|
|
|
@ -5,6 +5,8 @@ import pkg/questionable/results
|
|||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
import ../../blocktype
|
||||
|
||||
export questionable
|
||||
export stint
|
||||
export BlockPresenceType
|
||||
|
@ -14,7 +16,7 @@ upraises.push: {.upraises: [].}
|
|||
type
|
||||
PresenceMessage* = blockexc.BlockPresence
|
||||
Presence* = object
|
||||
cid*: Cid
|
||||
address*: BlockAddress
|
||||
have*: bool
|
||||
price*: UInt256
|
||||
|
||||
|
@ -24,19 +26,18 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
|
|||
UInt256.fromBytesBE(bytes).some
|
||||
|
||||
func init*(_: type Presence, message: PresenceMessage): ?Presence =
|
||||
without cid =? Cid.init(message.cid) and
|
||||
price =? UInt256.parse(message.price):
|
||||
without price =? UInt256.parse(message.price):
|
||||
return none Presence
|
||||
|
||||
some Presence(
|
||||
cid: cid,
|
||||
address: message.address,
|
||||
have: message.`type` == BlockPresenceType.Have,
|
||||
price: price
|
||||
)
|
||||
|
||||
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
||||
PresenceMessage(
|
||||
cid: presence.cid.data.buffer,
|
||||
address: presence.address,
|
||||
`type`: if presence.have:
|
||||
BlockPresenceType.Have
|
||||
else:
|
||||
|
|
|
@ -8,120 +8,78 @@
|
|||
## those terms.
|
||||
|
||||
import std/tables
|
||||
import std/sugar
|
||||
|
||||
export tables
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/[cid, multicodec, multihash]
|
||||
import pkg/stew/byteutils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronicles
|
||||
|
||||
import ./formats
|
||||
import ./units
|
||||
import ./utils
|
||||
import ./errors
|
||||
import ./logutils
|
||||
import ./utils/json
|
||||
import ./codextypes
|
||||
|
||||
export errors, formats
|
||||
|
||||
const
|
||||
# Size of blocks for storage / network exchange,
|
||||
# should be divisible by 31 for PoR and by 64 for Leopard ECC
|
||||
BlockSize* = 31 * 64 * 33
|
||||
export errors, logutils, units, codextypes
|
||||
|
||||
type
|
||||
Block* = ref object of RootObj
|
||||
cid*: Cid
|
||||
data*: seq[byte]
|
||||
|
||||
BlockNotFoundError* = object of CodexError
|
||||
BlockAddress* = object
|
||||
case leaf*: bool
|
||||
of true:
|
||||
treeCid* {.serialize.}: Cid
|
||||
index* {.serialize.}: Natural
|
||||
else:
|
||||
cid* {.serialize.}: Cid
|
||||
|
||||
template EmptyCid*: untyped =
|
||||
var
|
||||
emptyCid {.global, threadvar.}:
|
||||
array[CIDv0..CIDv1, Table[MultiCodec, Cid]]
|
||||
logutils.formatIt(LogFormat.textLines, BlockAddress):
|
||||
if it.leaf:
|
||||
"treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
|
||||
else:
|
||||
"cid: " & shortLog($it.cid)
|
||||
|
||||
once:
|
||||
emptyCid = [
|
||||
CIDv0: {
|
||||
multiCodec("sha2-256"): Cid
|
||||
.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
|
||||
.get()
|
||||
}.toTable,
|
||||
CIDv1: {
|
||||
multiCodec("sha2-256"): Cid
|
||||
.init("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")
|
||||
.get()
|
||||
}.toTable,
|
||||
]
|
||||
logutils.formatIt(LogFormat.json, BlockAddress): %it
|
||||
|
||||
emptyCid
|
||||
proc `==`*(a, b: BlockAddress): bool =
|
||||
a.leaf == b.leaf and
|
||||
(
|
||||
if a.leaf:
|
||||
a.treeCid == b.treeCid and a.index == b.index
|
||||
else:
|
||||
a.cid == b.cid
|
||||
)
|
||||
|
||||
template EmptyDigests*: untyped =
|
||||
var
|
||||
emptyDigests {.global, threadvar.}:
|
||||
array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]]
|
||||
proc `$`*(a: BlockAddress): string =
|
||||
if a.leaf:
|
||||
"treeCid: " & $a.treeCid & ", index: " & $a.index
|
||||
else:
|
||||
"cid: " & $a.cid
|
||||
|
||||
once:
|
||||
emptyDigests = [
|
||||
CIDv0: {
|
||||
multiCodec("sha2-256"): EmptyCid[CIDv0]
|
||||
.catch
|
||||
.get()[multiCodec("sha2-256")]
|
||||
.catch
|
||||
.get()
|
||||
.mhash
|
||||
.get()
|
||||
}.toTable,
|
||||
CIDv1: {
|
||||
multiCodec("sha2-256"): EmptyCid[CIDv1]
|
||||
.catch
|
||||
.get()[multiCodec("sha2-256")]
|
||||
.catch
|
||||
.get()
|
||||
.mhash
|
||||
.get()
|
||||
}.toTable,
|
||||
]
|
||||
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||
if a.leaf:
|
||||
a.treeCid
|
||||
else:
|
||||
a.cid
|
||||
|
||||
emptyDigests
|
||||
proc address*(b: Block): BlockAddress =
|
||||
BlockAddress(leaf: false, cid: b.cid)
|
||||
|
||||
template EmptyBlock*: untyped =
|
||||
var
|
||||
emptyBlock {.global, threadvar.}:
|
||||
array[CIDv0..CIDv1, Table[MultiCodec, Block]]
|
||||
proc init*(_: type BlockAddress, cid: Cid): BlockAddress =
|
||||
BlockAddress(leaf: false, cid: cid)
|
||||
|
||||
once:
|
||||
emptyBlock = [
|
||||
CIDv0: {
|
||||
multiCodec("sha2-256"): Block(
|
||||
cid: EmptyCid[CIDv0][multiCodec("sha2-256")])
|
||||
}.toTable,
|
||||
CIDv1: {
|
||||
multiCodec("sha2-256"): Block(
|
||||
cid: EmptyCid[CIDv1][multiCodec("sha2-256")])
|
||||
}.toTable,
|
||||
]
|
||||
|
||||
emptyBlock
|
||||
|
||||
proc isEmpty*(cid: Cid): bool =
|
||||
cid == EmptyCid[cid.cidver]
|
||||
.catch
|
||||
.get()[cid.mhash.get().mcodec]
|
||||
.catch
|
||||
.get()
|
||||
|
||||
proc isEmpty*(blk: Block): bool =
|
||||
blk.cid.isEmpty
|
||||
|
||||
proc emptyBlock*(cid: Cid): Block =
|
||||
EmptyBlock[cid.cidver]
|
||||
.catch
|
||||
.get()[cid.mhash.get().mcodec]
|
||||
.catch
|
||||
.get()
|
||||
proc init*(_: type BlockAddress, treeCid: Cid, index: Natural): BlockAddress =
|
||||
BlockAddress(leaf: true, treeCid: treeCid, index: index)
|
||||
|
||||
proc `$`*(b: Block): string =
|
||||
result &= "cid: " & $b.cid
|
||||
|
@ -131,8 +89,10 @@ func new*(
|
|||
T: type Block,
|
||||
data: openArray[byte] = [],
|
||||
version = CIDv1,
|
||||
mcodec = multiCodec("sha2-256"),
|
||||
codec = multiCodec("raw")): ?!T =
|
||||
mcodec = Sha256HashCodec,
|
||||
codec = BlockCodec): ?!Block =
|
||||
## creates a new block for both storage and network IO
|
||||
##
|
||||
|
||||
let
|
||||
hash = ? MultiHash.digest($mcodec, data).mapFailure
|
||||
|
@ -144,21 +104,39 @@ func new*(
|
|||
cid: cid,
|
||||
data: @data).success
|
||||
|
||||
func new*(
|
||||
T: type Block,
|
||||
cid: Cid,
|
||||
data: openArray[byte],
|
||||
verify: bool = true): ?!T =
|
||||
proc new*(
|
||||
T: type Block,
|
||||
cid: Cid,
|
||||
data: openArray[byte],
|
||||
verify: bool = true
|
||||
): ?!Block =
|
||||
## creates a new block for both storage and network IO
|
||||
##
|
||||
|
||||
let
|
||||
mhash = ? cid.mhash.mapFailure
|
||||
b = ? Block.new(
|
||||
data = @data,
|
||||
version = cid.cidver,
|
||||
codec = cid.mcodec,
|
||||
mcodec = mhash.mcodec)
|
||||
if verify:
|
||||
let
|
||||
mhash = ? cid.mhash.mapFailure
|
||||
computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure
|
||||
computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
|
||||
if computedCid != cid:
|
||||
return "Cid doesn't match the data".failure
|
||||
|
||||
if verify and cid != b.cid:
|
||||
return "Cid and content don't match!".failure
|
||||
return Block(
|
||||
cid: cid,
|
||||
data: @data
|
||||
).success
|
||||
|
||||
success b
|
||||
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
|
||||
emptyCid(version, hcodec, BlockCodec)
|
||||
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
|
||||
|
||||
proc emptyBlock*(cid: Cid): ?!Block =
|
||||
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
||||
emptyBlock(cid.cidver, mhash.mcodec))
|
||||
|
||||
proc isEmpty*(cid: Cid): bool =
|
||||
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
||||
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
|
||||
|
||||
proc isEmpty*(blk: Block): bool =
|
||||
blk.cid.isEmpty
|
||||
|
|
|
@ -13,18 +13,18 @@ import pkg/upraises
|
|||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronos
|
||||
import pkg/libp2p except shuffle
|
||||
|
||||
import ./blocktype
|
||||
import ./logutils
|
||||
|
||||
export blocktype
|
||||
|
||||
const
|
||||
DefaultChunkSize* = BlockSize
|
||||
DefaultChunkSize* = DefaultBlockSize
|
||||
|
||||
type
|
||||
# default reader type
|
||||
|
@ -35,7 +35,7 @@ type
|
|||
Chunker* = ref object
|
||||
reader*: Reader # Procedure called to actually read the data
|
||||
offset*: int # Bytes read so far (position in the stream)
|
||||
chunkSize*: Natural # Size of each chunk
|
||||
chunkSize*: NBytes # Size of each chunk
|
||||
pad*: bool # Pad last chunk to chunkSize?
|
||||
|
||||
FileChunker* = Chunker
|
||||
|
@ -46,7 +46,7 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
|||
## the instantiated chunker
|
||||
##
|
||||
|
||||
var buff = newSeq[byte](c.chunkSize)
|
||||
var buff = newSeq[byte](c.chunkSize.int)
|
||||
let read = await c.reader(cast[ChunkBuffer](addr buff[0]), buff.len)
|
||||
|
||||
if read <= 0:
|
||||
|
@ -59,22 +59,26 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
|||
|
||||
return move buff
|
||||
|
||||
func new*(
|
||||
T: type Chunker,
|
||||
reader: Reader,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true): T =
|
||||
|
||||
T(reader: reader,
|
||||
proc new*(
|
||||
T: type Chunker,
|
||||
reader: Reader,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true
|
||||
): Chunker =
|
||||
## create a new Chunker instance
|
||||
##
|
||||
Chunker(
|
||||
reader: reader,
|
||||
offset: 0,
|
||||
chunkSize: chunkSize,
|
||||
pad: pad)
|
||||
|
||||
proc new*(
|
||||
T: type LPStreamChunker,
|
||||
stream: LPStream,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true): T =
|
||||
T: type LPStreamChunker,
|
||||
stream: LPStream,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true
|
||||
): LPStreamChunker =
|
||||
## create the default File chunker
|
||||
##
|
||||
|
||||
|
@ -86,22 +90,25 @@ proc new*(
|
|||
res += await stream.readOnce(addr data[res], len - res)
|
||||
except LPStreamEOFError as exc:
|
||||
trace "LPStreamChunker stream Eof", exc = exc.msg
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as exc:
|
||||
trace "CatchableError exception", exc = exc.msg
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
return res
|
||||
|
||||
T.new(
|
||||
LPStreamChunker.new(
|
||||
reader = reader,
|
||||
chunkSize = chunkSize,
|
||||
pad = pad)
|
||||
|
||||
proc new*(
|
||||
T: type FileChunker,
|
||||
file: File,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true): T =
|
||||
T: type FileChunker,
|
||||
file: File,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true
|
||||
): FileChunker =
|
||||
## create the default File chunker
|
||||
##
|
||||
|
||||
|
@ -117,13 +124,15 @@ proc new*(
|
|||
total += res
|
||||
except IOError as exc:
|
||||
trace "Exception reading file", exc = exc.msg
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as exc:
|
||||
trace "CatchableError exception", exc = exc.msg
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
return total
|
||||
|
||||
T.new(
|
||||
FileChunker.new(
|
||||
reader = reader,
|
||||
chunkSize = chunkSize,
|
||||
pad = pad)
|
||||
|
|
|
@ -1,16 +1,24 @@
|
|||
import pkg/chronos
|
||||
import pkg/stew/endians2
|
||||
import pkg/upraises
|
||||
import pkg/stint
|
||||
|
||||
type
|
||||
Clock* = ref object of RootObj
|
||||
SecondsSince1970* = int64
|
||||
Timeout* = object of CatchableError
|
||||
|
||||
method now*(clock: Clock): SecondsSince1970 {.base.} =
|
||||
method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
proc waitUntil*(clock: Clock, time: SecondsSince1970) {.async.} =
|
||||
while clock.now() < time:
|
||||
await sleepAsync(1.seconds)
|
||||
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method start*(clock: Clock) {.base, async.} =
|
||||
discard
|
||||
|
||||
method stop*(clock: Clock) {.base, async.} =
|
||||
discard
|
||||
|
||||
proc withTimeout*(future: Future[void],
|
||||
clock: Clock,
|
||||
|
@ -23,3 +31,14 @@ proc withTimeout*(future: Future[void],
|
|||
if not future.completed:
|
||||
await future.cancelAndWait()
|
||||
raise newException(Timeout, "Timed out")
|
||||
|
||||
proc toBytes*(i: SecondsSince1970): seq[byte] =
|
||||
let asUint = cast[uint64](i)
|
||||
@(asUint.toBytes)
|
||||
|
||||
proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 =
|
||||
let asUint = uint64.fromBytes(bytes)
|
||||
cast[int64](asUint)
|
||||
|
||||
proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 =
|
||||
bigint.truncate(int64)
|
||||
|
|
264
codex/codex.nim
264
codex/codex.nim
|
@ -8,10 +8,11 @@
|
|||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/os
|
||||
import std/sugar
|
||||
import std/tables
|
||||
import std/cpuinfo
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/presto
|
||||
import pkg/libp2p
|
||||
|
@ -20,38 +21,140 @@ import pkg/confutils/defs
|
|||
import pkg/nitro
|
||||
import pkg/stew/io2
|
||||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/datastore
|
||||
import pkg/ethers except Rng
|
||||
import pkg/stew/io2
|
||||
import pkg/taskpools
|
||||
|
||||
import ./node
|
||||
import ./conf
|
||||
import ./rng
|
||||
import ./rest/api
|
||||
import ./stores
|
||||
import ./slots
|
||||
import ./blockexchange
|
||||
import ./utils/fileutils
|
||||
import ./erasure
|
||||
import ./discovery
|
||||
import ./contracts
|
||||
import ./utils/keyutils
|
||||
import ./systemclock
|
||||
import ./contracts/clock
|
||||
import ./contracts/deployment
|
||||
import ./utils/addrutils
|
||||
import ./namespaces
|
||||
import ./codextypes
|
||||
import ./logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex node"
|
||||
|
||||
type
|
||||
CodexServer* = ref object
|
||||
runHandle: Future[void]
|
||||
config: CodexConf
|
||||
restServer: RestServerRef
|
||||
codexNode: CodexNodeRef
|
||||
repoStore: RepoStore
|
||||
maintenance: BlockMaintainer
|
||||
taskpool: Taskpool
|
||||
|
||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||
EthWallet = ethers.Wallet
|
||||
|
||||
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||
var sleepTime = 1
|
||||
trace "Checking sync state of Ethereum provider..."
|
||||
while await provider.isSyncing:
|
||||
notice "Waiting for Ethereum provider to sync..."
|
||||
await sleepAsync(sleepTime.seconds)
|
||||
if sleepTime < 10:
|
||||
inc sleepTime
|
||||
trace "Ethereum provider is synced."
|
||||
|
||||
proc bootstrapInteractions(
|
||||
s: CodexServer): Future[void] {.async.} =
|
||||
## bootstrap interactions and return contracts
|
||||
## using clients, hosts, validators pairings
|
||||
##
|
||||
let
|
||||
config = s.config
|
||||
repo = s.repoStore
|
||||
|
||||
if config.persistence:
|
||||
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||
await waitForSync(provider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
signer = provider.getSigner(account)
|
||||
elif keyFile =? config.ethPrivateKey:
|
||||
without isSecure =? checkSecureFile(keyFile):
|
||||
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||
quit QuitFailure
|
||||
if not isSecure:
|
||||
error "Ethereum private key file does not have safe file permissions"
|
||||
quit QuitFailure
|
||||
without key =? keyFile.readAllChars():
|
||||
error "Unable to read Ethereum private key file"
|
||||
quit QuitFailure
|
||||
without wallet =? EthWallet.new(key.strip(), provider):
|
||||
error "Invalid Ethereum private key in file"
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
var host: ?HostInteractions
|
||||
var validator: ?ValidatorInteractions
|
||||
|
||||
if config.validator or config.persistence:
|
||||
s.codexNode.clock = clock
|
||||
else:
|
||||
s.codexNode.clock = SystemClock()
|
||||
|
||||
if config.persistence:
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when codex_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
else:
|
||||
let proofFailures = 0
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
|
||||
if config.validator:
|
||||
let validation = Validation.new(clock, market, config.validatorMaxSlots)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
s.restServer.start()
|
||||
await s.codexNode.start()
|
||||
trace "Starting codex node", config = $s.config
|
||||
|
||||
await s.repoStore.start()
|
||||
s.maintenance.start()
|
||||
|
||||
await s.codexNode.switch.start()
|
||||
|
||||
let
|
||||
# TODO: Can't define this as constants, pity
|
||||
# TODO: Can't define these as constants, pity
|
||||
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
|
||||
.expect("Should create multiaddress")
|
||||
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
|
||||
|
@ -75,32 +178,29 @@ proc start*(s: CodexServer) {.async.} =
|
|||
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
||||
s.codexNode.discovery.updateDhtRecord(s.config.nat, s.config.discoveryPort)
|
||||
|
||||
s.runHandle = newFuture[void]("codex.runHandle")
|
||||
await s.runHandle
|
||||
await s.bootstrapInteractions()
|
||||
await s.codexNode.start()
|
||||
s.restServer.start()
|
||||
|
||||
proc stop*(s: CodexServer) {.async.} =
|
||||
notice "Stopping codex node"
|
||||
|
||||
|
||||
s.taskpool.syncAll()
|
||||
s.taskpool.shutdown()
|
||||
|
||||
await allFuturesThrowing(
|
||||
s.restServer.stop(), s.codexNode.stop())
|
||||
|
||||
s.runHandle.complete()
|
||||
|
||||
proc new(_: type ContractInteractions, config: CodexConf): ?ContractInteractions =
|
||||
if not config.persistence:
|
||||
if config.ethAccount.isSome:
|
||||
warn "Ethereum account was set, but persistence is not enabled"
|
||||
return
|
||||
|
||||
without account =? config.ethAccount:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
if deployment =? config.ethDeployment:
|
||||
ContractInteractions.new(config.ethProvider, account, deployment)
|
||||
else:
|
||||
ContractInteractions.new(config.ethProvider, account)
|
||||
|
||||
proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey): T =
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop())
|
||||
|
||||
proc new*(
|
||||
T: type CodexServer,
|
||||
config: CodexConf,
|
||||
privateKey: CodexPrivateKey): CodexServer =
|
||||
## create CodexServer including setting up datastore, repostore, etc
|
||||
let
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
|
@ -118,13 +218,22 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
|||
var
|
||||
cache: CacheStore = nil
|
||||
|
||||
if config.cacheSize > 0:
|
||||
cache = CacheStore.new(cacheSize = config.cacheSize * MiB)
|
||||
if config.cacheSize > 0'nb:
|
||||
cache = CacheStore.new(cacheSize = config.cacheSize)
|
||||
## Is unused?
|
||||
|
||||
let
|
||||
discoveryStore = Datastore(SQLiteDatastore.new(
|
||||
config.dataDir / "dht")
|
||||
.expect("Should not fail!"))
|
||||
discoveryDir = config.dataDir / CodexDhtNamespace
|
||||
|
||||
if io2.createPath(discoveryDir).isErr:
|
||||
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
|
||||
raise (ref Defect)(
|
||||
msg: "Unable to create discovery directory for block store: " & discoveryDir)
|
||||
|
||||
let
|
||||
discoveryStore = Datastore(
|
||||
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
|
||||
.expect("Should create discovery datastore!"))
|
||||
|
||||
discovery = Discovery.new(
|
||||
switch.peerInfo.privateKey,
|
||||
|
@ -136,32 +245,85 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
|||
|
||||
wallet = WalletRef.new(EthPrivateKey.random())
|
||||
network = BlockExcNetwork.new(switch)
|
||||
repoDir = config.dataDir / "repo"
|
||||
|
||||
if io2.createPath(repoDir).isErr:
|
||||
trace "Unable to create data directory for block store", dataDir = repoDir
|
||||
raise (ref Defect)(
|
||||
msg: "Unable to create data directory for block store: " & repoDir)
|
||||
repoData = case config.repoKind
|
||||
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
|
||||
.expect("Should create repo file data store!"))
|
||||
of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir)
|
||||
.expect("Should create repo SQLite data store!"))
|
||||
of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir)
|
||||
.expect("Should create repo LevelDB data store!"))
|
||||
|
||||
repoStore = RepoStore.new(
|
||||
repoDs = repoData,
|
||||
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace)
|
||||
.expect("Should create metadata store!"),
|
||||
quotaMaxBytes = config.storageQuota,
|
||||
blockTtl = config.blockTtl)
|
||||
|
||||
maintenance = BlockMaintainer.new(
|
||||
repoStore,
|
||||
interval = config.blockMaintenanceInterval,
|
||||
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks)
|
||||
|
||||
let
|
||||
localStore = FSStore.new(repoDir, cache = cache)
|
||||
peerStore = PeerCtxStore.new()
|
||||
pendingBlocks = PendingBlocksManager.new()
|
||||
blockDiscovery = DiscoveryEngine.new(localStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(localStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
||||
store = NetworkStore.new(engine, localStore)
|
||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||
contracts = ContractInteractions.new(config)
|
||||
codexNode = CodexNodeRef.new(switch, store, engine, erasure, discovery, contracts)
|
||||
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
||||
store = NetworkStore.new(engine, repoStore)
|
||||
prover = if config.prover:
|
||||
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and
|
||||
endsWith($config.circomR1cs, ".r1cs"):
|
||||
error "Circom R1CS file not accessible"
|
||||
raise (ref Defect)(
|
||||
msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
|
||||
|
||||
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and
|
||||
endsWith($config.circomWasm, ".wasm"):
|
||||
error "Circom wasm file not accessible"
|
||||
raise (ref Defect)(
|
||||
msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)")
|
||||
|
||||
let zkey = if not config.circomNoZkey:
|
||||
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and
|
||||
endsWith($config.circomZkey, ".zkey"):
|
||||
error "Circom zkey file not accessible"
|
||||
raise (ref Defect)(
|
||||
msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)")
|
||||
|
||||
$config.circomZkey
|
||||
else: ""
|
||||
|
||||
some Prover.new(
|
||||
store,
|
||||
CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey),
|
||||
config.numProofSamples)
|
||||
else:
|
||||
none Prover
|
||||
|
||||
taskpool = Taskpool.new(num_threads = countProcessors())
|
||||
|
||||
codexNode = CodexNodeRef.new(
|
||||
switch = switch,
|
||||
networkStore = store,
|
||||
engine = engine,
|
||||
prover = prover,
|
||||
discovery = discovery,
|
||||
taskpool = taskpool)
|
||||
|
||||
restServer = RestServerRef.new(
|
||||
codexNode.initRestApi(config),
|
||||
initTAddress("127.0.0.1" , config.apiPort),
|
||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||
initTAddress(config.apiBindAddress , config.apiPort),
|
||||
bufferSize = (1024 * 64),
|
||||
maxRequestBodySize = int.high)
|
||||
.expect("Should start rest server!")
|
||||
|
||||
switch.mount(network)
|
||||
T(
|
||||
|
||||
CodexServer(
|
||||
config: config,
|
||||
codexNode: codexNode,
|
||||
restServer: restServer)
|
||||
restServer: restServer,
|
||||
repoStore: repoStore,
|
||||
maintenance: maintenance,
|
||||
taskpool: taskpool)
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/tables
|
||||
import std/sugar
|
||||
|
||||
import pkg/libp2p/multicodec
|
||||
import pkg/libp2p/multihash
|
||||
import pkg/libp2p/cid
|
||||
import pkg/results
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./units
|
||||
import ./errors
|
||||
|
||||
export tables
|
||||
|
||||
const
|
||||
# Size of blocks for storage / network exchange,
|
||||
DefaultBlockSize* = NBytes 1024*64
|
||||
DefaultCellSize* = NBytes 2048
|
||||
|
||||
# Proving defaults
|
||||
DefaultMaxSlotDepth* = 32
|
||||
DefaultMaxDatasetDepth* = 8
|
||||
DefaultBlockDepth* = 5
|
||||
DefaultCellElms* = 67
|
||||
DefaultSamplesNum* = 5
|
||||
|
||||
# hashes
|
||||
Sha256HashCodec* = multiCodec("sha2-256")
|
||||
Sha512HashCodec* = multiCodec("sha2-512")
|
||||
Pos2Bn128SpngCodec* = multiCodec("poseidon2-alt_bn_128-sponge-r2")
|
||||
Pos2Bn128MrklCodec* = multiCodec("poseidon2-alt_bn_128-merkle-2kb")
|
||||
|
||||
ManifestCodec* = multiCodec("codex-manifest")
|
||||
DatasetRootCodec* = multiCodec("codex-root")
|
||||
BlockCodec* = multiCodec("codex-block")
|
||||
SlotRootCodec* = multiCodec("codex-slot-root")
|
||||
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
||||
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
||||
|
||||
CodexHashesCodecs* = [
|
||||
Sha256HashCodec,
|
||||
Pos2Bn128SpngCodec,
|
||||
Pos2Bn128MrklCodec
|
||||
]
|
||||
|
||||
CodexPrimitivesCodecs* = [
|
||||
ManifestCodec,
|
||||
DatasetRootCodec,
|
||||
BlockCodec,
|
||||
SlotRootCodec,
|
||||
SlotProvingRootCodec,
|
||||
CodexSlotCellCodec,
|
||||
]
|
||||
|
||||
proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||
## Initialize padding blocks table
|
||||
##
|
||||
## TODO: Ideally this is done at compile time, but for now
|
||||
## we do it at runtime because of an `importc` error that is
|
||||
## coming from somewhere in MultiHash that I can't track down.
|
||||
##
|
||||
|
||||
let
|
||||
emptyData: seq[byte] = @[]
|
||||
PadHashes = {
|
||||
Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
|
||||
Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||
}.toTable
|
||||
|
||||
var
|
||||
table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||
|
||||
for hcodec, mhash in PadHashes.pairs:
|
||||
table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure
|
||||
|
||||
success table
|
||||
|
||||
proc emptyCid*(
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
dcodec: MultiCodec): ?!Cid =
|
||||
## Returns cid representing empty content,
|
||||
## given cid version, hash codec and data codec
|
||||
##
|
||||
|
||||
var
|
||||
table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
|
||||
|
||||
once:
|
||||
table = ? initEmptyCidTable()
|
||||
|
||||
table[(version, hcodec, dcodec)].catch
|
||||
|
||||
proc emptyDigest*(
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
dcodec: MultiCodec): ?!MultiHash =
|
||||
## Returns hash representing empty content,
|
||||
## given cid version, hash codec and data codec
|
||||
##
|
||||
emptyCid(version, hcodec, dcodec)
|
||||
.flatMap((cid: Cid) => cid.mhash.mapFailure)
|
543
codex/conf.nim
543
codex/conf.nim
|
@ -7,9 +7,7 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/os
|
||||
import std/terminal
|
||||
|
@ -17,37 +15,84 @@ import std/options
|
|||
import std/strutils
|
||||
import std/typetraits
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/chronicles/helpers
|
||||
import pkg/chronicles/topics_registry
|
||||
import pkg/confutils/defs
|
||||
import pkg/confutils/std/net
|
||||
import pkg/toml_serialization
|
||||
import pkg/metrics
|
||||
import pkg/metrics/chronos_httpserver
|
||||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/stew/shims/parseutils
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./codextypes
|
||||
import ./discovery
|
||||
import ./stores/cachestore
|
||||
import ./logutils
|
||||
import ./stores
|
||||
import ./units
|
||||
import ./utils
|
||||
|
||||
export DefaultCacheSizeMiB, net
|
||||
export units, net, codextypes, logutils
|
||||
|
||||
export
|
||||
DefaultQuotaBytes,
|
||||
DefaultBlockTtl,
|
||||
DefaultBlockMaintenanceInterval,
|
||||
DefaultNumberOfBlocksToMaintainPerInterval
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Codex"
|
||||
else:
|
||||
".cache" / "codex"
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
const
|
||||
codex_enable_api_debug_peers* {.booldefine.} = false
|
||||
codex_enable_proof_failures* {.booldefine.} = false
|
||||
codex_enable_log_counter* {.booldefine.} = false
|
||||
|
||||
DefaultDataDir* = defaultDataDir()
|
||||
|
||||
type
|
||||
StartUpCommand* {.pure.} = enum
|
||||
noCommand,
|
||||
initNode
|
||||
StartUpCmd* {.pure.} = enum
|
||||
noCmd
|
||||
persistence
|
||||
|
||||
LogKind* = enum
|
||||
PersistenceCmd* {.pure.} = enum
|
||||
noCmd
|
||||
prover
|
||||
|
||||
LogKind* {.pure.} = enum
|
||||
Auto = "auto"
|
||||
Colors = "colors"
|
||||
NoColors = "nocolors"
|
||||
Json = "json"
|
||||
None = "none"
|
||||
|
||||
RepoKind* = enum
|
||||
repoFS = "fs"
|
||||
repoSQLite = "sqlite"
|
||||
repoLevelDb = "leveldb"
|
||||
|
||||
CodexConf* = object
|
||||
configFile* {.
|
||||
desc: "Loads the configuration from a TOML file"
|
||||
defaultValueDesc: "none"
|
||||
defaultValue: InputFile.none
|
||||
name: "config-file" }: Option[InputFile]
|
||||
|
||||
logLevel* {.
|
||||
defaultValue: "INFO"
|
||||
defaultValue: "info"
|
||||
desc: "Sets the log level",
|
||||
name: "log-level" }: string
|
||||
|
||||
|
@ -75,86 +120,132 @@ type
|
|||
name: "metrics-port" }: Port
|
||||
|
||||
dataDir* {.
|
||||
desc: "The directory where codex will store configuration and data."
|
||||
defaultValue: defaultDataDir()
|
||||
defaultValueDesc: ""
|
||||
desc: "The directory where codex will store configuration and data"
|
||||
defaultValue: DefaultDataDir
|
||||
defaultValueDesc: $DefaultDataDir
|
||||
abbr: "d"
|
||||
name: "data-dir" }: OutDir
|
||||
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
.expect("Should init multiaddress")]
|
||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||
abbr: "i"
|
||||
name: "listen-addrs" }: seq[MultiAddress]
|
||||
|
||||
# TODO: change this once we integrate nat support
|
||||
nat* {.
|
||||
desc: "IP Addresses to announce behind a NAT"
|
||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||
defaultValueDesc: "127.0.0.1"
|
||||
abbr: "a"
|
||||
name: "nat" }: ValidIpAddress
|
||||
|
||||
discoveryIp* {.
|
||||
desc: "Discovery listen address"
|
||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||
defaultValueDesc: "0.0.0.0"
|
||||
abbr: "e"
|
||||
name: "disc-ip" }: ValidIpAddress
|
||||
|
||||
discoveryPort* {.
|
||||
desc: "Discovery (UDP) port"
|
||||
defaultValue: 8090.Port
|
||||
defaultValueDesc: "8090"
|
||||
abbr: "u"
|
||||
name: "disc-port" }: Port
|
||||
|
||||
netPrivKeyFile* {.
|
||||
desc: "Source of network (secp256k1) private key file path or name"
|
||||
defaultValue: "key"
|
||||
name: "net-privkey" }: string
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||
abbr: "b"
|
||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||
|
||||
maxPeers* {.
|
||||
desc: "The maximum number of peers to connect to"
|
||||
defaultValue: 160
|
||||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
apiBindAddress* {.
|
||||
desc: "The REST API bind address"
|
||||
defaultValue: "127.0.0.1"
|
||||
name: "api-bindaddr"
|
||||
}: string
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
defaultValue: 8080.Port
|
||||
defaultValueDesc: "8080"
|
||||
name: "api-port"
|
||||
abbr: "p" }: Port
|
||||
|
||||
apiCorsAllowedOrigin* {.
|
||||
desc: "The REST Api CORS allowed origin for downloading data. '*' will allow all origins, '' will allow none.",
|
||||
defaultValue: string.none
|
||||
defaultValueDesc: "Disallow all cross origin requests to download data"
|
||||
name: "api-cors-origin" }: Option[string]
|
||||
|
||||
repoKind* {.
|
||||
desc: "Backend for main repo store (fs, sqlite, leveldb)"
|
||||
defaultValueDesc: "fs"
|
||||
defaultValue: repoFS
|
||||
name: "repo-kind" }: RepoKind
|
||||
|
||||
storageQuota* {.
|
||||
desc: "The size of the total storage quota dedicated to the node"
|
||||
defaultValue: DefaultQuotaBytes
|
||||
defaultValueDesc: $DefaultQuotaBytes
|
||||
name: "storage-quota"
|
||||
abbr: "q" }: NBytes
|
||||
|
||||
blockTtl* {.
|
||||
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||
defaultValue: DefaultBlockTtl
|
||||
defaultValueDesc: $DefaultBlockTtl
|
||||
name: "block-ttl"
|
||||
abbr: "t" }: Duration
|
||||
|
||||
blockMaintenanceInterval* {.
|
||||
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
||||
defaultValue: DefaultBlockMaintenanceInterval
|
||||
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
||||
name: "block-mi" }: Duration
|
||||
|
||||
blockMaintenanceNumberOfBlocks* {.
|
||||
desc: "Number of blocks to check every maintenance cycle"
|
||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
||||
name: "block-mn" }: int
|
||||
|
||||
cacheSize* {.
|
||||
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||
defaultValue: 0
|
||||
defaultValueDesc: "0"
|
||||
name: "cache-size"
|
||||
abbr: "c" }: NBytes
|
||||
|
||||
logFile* {.
|
||||
desc: "Logs to file"
|
||||
defaultValue: string.none
|
||||
name: "log-file"
|
||||
hidden
|
||||
.}: Option[string]
|
||||
|
||||
case cmd* {.
|
||||
command
|
||||
defaultValue: noCommand }: StartUpCommand
|
||||
|
||||
of noCommand:
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
.expect("Should init multiaddress")]
|
||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||
abbr: "i"
|
||||
name: "listen-addrs" }: seq[MultiAddress]
|
||||
|
||||
nat* {.
|
||||
# TODO: change this once we integrate nat support
|
||||
desc: "IP Addresses to announce behind a NAT"
|
||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||
defaultValueDesc: "127.0.0.1"
|
||||
abbr: "a"
|
||||
name: "nat" }: ValidIpAddress
|
||||
|
||||
discoveryIp* {.
|
||||
desc: "Discovery listen address"
|
||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||
defaultValueDesc: "0.0.0.0"
|
||||
name: "disc-ip" }: ValidIpAddress
|
||||
|
||||
discoveryPort* {.
|
||||
desc: "Discovery (UDP) port"
|
||||
defaultValue: Port(8090)
|
||||
defaultValueDesc: "8090"
|
||||
name: "disc-port" }: Port
|
||||
|
||||
netPrivKeyFile* {.
|
||||
desc: "Source of network (secp256k1) private key file path or name"
|
||||
defaultValue: "key"
|
||||
name: "net-privkey" }: string
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network."
|
||||
abbr: "b"
|
||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||
|
||||
maxPeers* {.
|
||||
desc: "The maximum number of peers to connect to"
|
||||
defaultValue: 160
|
||||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
defaultValue: 8080
|
||||
defaultValueDesc: "8080"
|
||||
name: "api-port"
|
||||
abbr: "p" }: int
|
||||
|
||||
cacheSize* {.
|
||||
desc: "The size in MiB of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||
defaultValue: 0
|
||||
defaultValueDesc: "0"
|
||||
name: "cache-size"
|
||||
abbr: "c" }: Natural
|
||||
|
||||
persistence* {.
|
||||
desc: "Enables persistence mechanism, requires an Ethereum node"
|
||||
defaultValue: false
|
||||
name: "persistence"
|
||||
.}: bool
|
||||
|
||||
defaultValue: noCmd
|
||||
command }: StartUpCmd
|
||||
of persistence:
|
||||
ethProvider* {.
|
||||
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
||||
defaultValue: "ws://localhost:8545"
|
||||
|
@ -164,66 +255,244 @@ type
|
|||
ethAccount* {.
|
||||
desc: "The Ethereum account that is used for storage contracts"
|
||||
defaultValue: EthAddress.none
|
||||
defaultValueDesc: ""
|
||||
name: "eth-account"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
ethDeployment* {.
|
||||
desc: "The json file describing the contract deployment"
|
||||
ethPrivateKey* {.
|
||||
desc: "File containing Ethereum private key for storage contracts"
|
||||
defaultValue: string.none
|
||||
name: "eth-deployment"
|
||||
defaultValueDesc: ""
|
||||
name: "eth-private-key"
|
||||
.}: Option[string]
|
||||
|
||||
of initNode:
|
||||
discard
|
||||
marketplaceAddress* {.
|
||||
desc: "Address of deployed Marketplace contract"
|
||||
defaultValue: EthAddress.none
|
||||
defaultValueDesc: ""
|
||||
name: "marketplace-address"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
# TODO: should go behind a feature flag
|
||||
simulateProofFailures* {.
|
||||
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
||||
defaultValue: 0
|
||||
name: "simulate-proof-failures"
|
||||
hidden
|
||||
.}: int
|
||||
|
||||
validator* {.
|
||||
desc: "Enables validator, requires an Ethereum node"
|
||||
defaultValue: false
|
||||
name: "validator"
|
||||
.}: bool
|
||||
|
||||
validatorMaxSlots* {.
|
||||
desc: "Maximum number of slots that the validator monitors"
|
||||
defaultValue: 1000
|
||||
name: "validator-max-slots"
|
||||
.}: int
|
||||
|
||||
case persistenceCmd* {.
|
||||
defaultValue: noCmd
|
||||
command }: PersistenceCmd
|
||||
|
||||
of PersistenceCmd.prover:
|
||||
circomR1cs* {.
|
||||
desc: "The r1cs file for the storage circuit"
|
||||
defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
|
||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
|
||||
name: "circom-r1cs"
|
||||
.}: InputFile
|
||||
|
||||
circomWasm* {.
|
||||
desc: "The wasm file for the storage circuit"
|
||||
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
|
||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
|
||||
name: "circom-wasm"
|
||||
.}: InputFile
|
||||
|
||||
circomZkey* {.
|
||||
desc: "The zkey file for the storage circuit"
|
||||
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
|
||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
|
||||
name: "circom-zkey"
|
||||
.}: InputFile
|
||||
|
||||
# TODO: should probably be hidden and behind a feature flag
|
||||
circomNoZkey* {.
|
||||
desc: "Ignore the zkey file - use only for testing!"
|
||||
defaultValue: false
|
||||
name: "circom-no-zkey"
|
||||
.}: bool
|
||||
|
||||
numProofSamples* {.
|
||||
desc: "Number of samples to prove"
|
||||
defaultValue: DefaultSamplesNum
|
||||
defaultValueDesc: $DefaultSamplesNum
|
||||
name: "proof-samples" }: int
|
||||
|
||||
maxSlotDepth* {.
|
||||
desc: "The maximum depth of the slot tree"
|
||||
defaultValue: DefaultMaxSlotDepth
|
||||
defaultValueDesc: $DefaultMaxSlotDepth
|
||||
name: "max-slot-depth" }: int
|
||||
|
||||
maxDatasetDepth* {.
|
||||
desc: "The maximum depth of the dataset tree"
|
||||
defaultValue: DefaultMaxDatasetDepth
|
||||
defaultValueDesc: $DefaultMaxDatasetDepth
|
||||
name: "max-dataset-depth" }: int
|
||||
|
||||
maxBlockDepth* {.
|
||||
desc: "The maximum depth of the network block merkle tree"
|
||||
defaultValue: DefaultBlockDepth
|
||||
defaultValueDesc: $DefaultBlockDepth
|
||||
name: "max-block-depth" }: int
|
||||
|
||||
maxCellElms* {.
|
||||
desc: "The maximum number of elements in a cell"
|
||||
defaultValue: DefaultCellElms
|
||||
defaultValueDesc: $DefaultCellElms
|
||||
name: "max-cell-elements" }: int
|
||||
of PersistenceCmd.noCmd:
|
||||
discard
|
||||
|
||||
of StartUpCmd.noCmd:
|
||||
discard # end of persistence
|
||||
|
||||
EthAddress* = ethers.Address
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, EthAddress): %it
|
||||
|
||||
func persistence*(self: CodexConf): bool =
|
||||
self.cmd == StartUpCmd.persistence
|
||||
|
||||
func prover*(self: CodexConf): bool =
|
||||
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
||||
|
||||
proc getCodexVersion(): string =
|
||||
let tag = strip(staticExec("git tag"))
|
||||
if tag.isEmptyOrWhitespace:
|
||||
return "untagged build"
|
||||
return tag
|
||||
|
||||
proc getCodexRevision(): string =
|
||||
# using a slice in a static context breaks nimsuggest for some reason
|
||||
var res = strip(staticExec("git rev-parse --short HEAD"))
|
||||
return res
|
||||
|
||||
proc getNimBanner(): string =
|
||||
staticExec("nim --version | grep Version")
|
||||
|
||||
const
|
||||
gitRevision* = strip(staticExec("git rev-parse --short HEAD"))[0..5]
|
||||
|
||||
nimBanner* = staticExec("nim --version | grep Version")
|
||||
|
||||
#TODO add versionMajor, Minor & Fix when we switch to semver
|
||||
codexVersion* = gitRevision
|
||||
codexVersion* = getCodexVersion()
|
||||
codexRevision* = getCodexRevision()
|
||||
nimBanner* = getNimBanner()
|
||||
|
||||
codexFullVersion* =
|
||||
"Codex build " & codexVersion & "\p" &
|
||||
"Codex version: " & codexVersion & "\p" &
|
||||
"Codex revision: " & codexRevision & "\p" &
|
||||
nimBanner
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Codex"
|
||||
proc parseCmdArg*(T: typedesc[MultiAddress],
|
||||
input: string): MultiAddress
|
||||
{.upraises: [ValueError, LPError].} =
|
||||
var ma: MultiAddress
|
||||
let res = MultiAddress.init(input)
|
||||
if res.isOk:
|
||||
ma = res.get()
|
||||
else:
|
||||
".cache" / "codex"
|
||||
warn "Invalid MultiAddress", input=input, error = res.error()
|
||||
quit QuitFailure
|
||||
ma
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
func parseCmdArg*(T: type MultiAddress, input: TaintedString): T
|
||||
{.raises: [ValueError, LPError, Defect].} =
|
||||
MultiAddress.init($input).tryGet()
|
||||
|
||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: TaintedString): T =
|
||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||
var res: SignedPeerRecord
|
||||
try:
|
||||
if not res.fromURI(uri):
|
||||
warn "Invalid SignedPeerRecord uri", uri=uri
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri
|
||||
quit QuitFailure
|
||||
except CatchableError as exc:
|
||||
warn "Invalid SignedPeerRecord uri", uri=uri, error=exc.msg
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||
quit QuitFailure
|
||||
res
|
||||
|
||||
func parseCmdArg*(T: type EthAddress, address: TaintedString): T =
|
||||
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
||||
EthAddress.init($address).get()
|
||||
|
||||
proc parseCmdArg*(T: type NBytes, val: string): T =
|
||||
var num = 0'i64
|
||||
let count = parseSize(val, num, alwaysBin = true)
|
||||
if count == 0:
|
||||
warn "Invalid number of bytes", nbytes = val
|
||||
quit QuitFailure
|
||||
NBytes(num)
|
||||
|
||||
proc parseCmdArg*(T: type Duration, val: string): T =
|
||||
var dur: Duration
|
||||
let count = parseDuration(val, dur)
|
||||
if count == 0:
|
||||
warn "Cannot parse duration", dur = dur
|
||||
quit QuitFailure
|
||||
dur
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var EthAddress)
|
||||
{.upraises: [SerializationError, IOError].} =
|
||||
val = EthAddress.init(r.readValue(string)).get()
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||
without uri =? r.readValue(string).catch, err:
|
||||
error "invalid SignedPeerRecord configuration value", error = err.msg
|
||||
quit QuitFailure
|
||||
|
||||
val = SignedPeerRecord.parseCmdArg(uri)
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||
without input =? r.readValue(string).catch, err:
|
||||
error "invalid MultiAddress configuration value", error = err.msg
|
||||
quit QuitFailure
|
||||
|
||||
let res = MultiAddress.init(input)
|
||||
if res.isOk:
|
||||
val = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input=input, error=res.error()
|
||||
quit QuitFailure
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var NBytes)
|
||||
{.upraises: [SerializationError, IOError].} =
|
||||
var value = 0'i64
|
||||
var str = r.readValue(string)
|
||||
let count = parseSize(str, value, alwaysBin = true)
|
||||
if count == 0:
|
||||
error "invalid number of bytes for configuration value", value = str
|
||||
quit QuitFailure
|
||||
val = NBytes(value)
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var Duration)
|
||||
{.upraises: [SerializationError, IOError].} =
|
||||
var str = r.readValue(string)
|
||||
var dur: Duration
|
||||
let count = parseDuration(str, dur)
|
||||
if count == 0:
|
||||
error "Invalid duration parse", value = str
|
||||
quit QuitFailure
|
||||
val = dur
|
||||
|
||||
# no idea why confutils needs this:
|
||||
proc completeCmdArg*(T: type EthAddress; val: TaintedString): seq[string] =
|
||||
proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
|
||||
discard
|
||||
|
||||
proc completeCmdArg*(T: type NBytes; val: string): seq[string] =
|
||||
discard
|
||||
|
||||
proc completeCmdArg*(T: type Duration; val: string): seq[string] =
|
||||
discard
|
||||
|
||||
# silly chronicles, colors is a compile-time property
|
||||
proc stripAnsi(v: string): string =
|
||||
proc stripAnsi*(v: string): string =
|
||||
var
|
||||
res = newStringOfCap(v.len)
|
||||
i: int
|
||||
|
@ -258,13 +527,13 @@ proc stripAnsi(v: string): string =
|
|||
|
||||
res
|
||||
|
||||
proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
|
||||
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
|
||||
# Updates log levels (without clearing old ones)
|
||||
let directives = logLevel.split(";")
|
||||
try:
|
||||
setLogLevel(parseEnum[LogLevel](directives[0]))
|
||||
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
|
||||
except ValueError:
|
||||
raise (ref ValueError)(msg: "Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL")
|
||||
raise (ref ValueError)(msg: "Please specify one of: trace, debug, info, notice, warn, error or fatal")
|
||||
|
||||
if directives.len > 1:
|
||||
for topicName, settings in parseTopicDirectives(directives[1..^1]):
|
||||
|
@ -272,9 +541,10 @@ proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
|
|||
warn "Unrecognized logging topic", topic = topicName
|
||||
|
||||
proc setupLogging*(conf: CodexConf) =
|
||||
when defaultChroniclesStream.outputs.type.arity != 2:
|
||||
when defaultChroniclesStream.outputs.type.arity != 3:
|
||||
warn "Logging configuration options not enabled in the current build"
|
||||
else:
|
||||
var logFile: ?IoHandle
|
||||
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
||||
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
||||
try:
|
||||
|
@ -289,9 +559,28 @@ proc setupLogging*(conf: CodexConf) =
|
|||
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
writeAndFlush(stdout, stripAnsi(msg))
|
||||
|
||||
proc fileFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
if file =? logFile:
|
||||
if error =? file.writeFile(stripAnsi(msg).toBytes).errorOption:
|
||||
error "failed to write to log file", errorCode = $error
|
||||
|
||||
defaultChroniclesStream.outputs[2].writer = noOutput
|
||||
if logFilePath =? conf.logFile and logFilePath.len > 0:
|
||||
let logFileHandle = openFile(
|
||||
logFilePath,
|
||||
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
|
||||
)
|
||||
if logFileHandle.isErr:
|
||||
error "failed to open log file",
|
||||
path = logFilePath,
|
||||
errorCode = $logFileHandle.error
|
||||
else:
|
||||
logFile = logFileHandle.option
|
||||
defaultChroniclesStream.outputs[2].writer = fileFlush
|
||||
|
||||
defaultChroniclesStream.outputs[1].writer = noOutput
|
||||
|
||||
defaultChroniclesStream.outputs[0].writer =
|
||||
let writer =
|
||||
case conf.logFormat:
|
||||
of LogKind.Auto:
|
||||
if isatty(stdout):
|
||||
|
@ -306,6 +595,16 @@ proc setupLogging*(conf: CodexConf) =
|
|||
of LogKind.None:
|
||||
noOutput
|
||||
|
||||
when codex_enable_log_counter:
|
||||
var counter = 0.uint64
|
||||
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
inc(counter)
|
||||
let withoutNewLine = msg[0..^2]
|
||||
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
|
||||
defaultChroniclesStream.outputs[0].writer = numberedWriter
|
||||
else:
|
||||
defaultChroniclesStream.outputs[0].writer = writer
|
||||
|
||||
try:
|
||||
updateLogLevel(conf.logLevel)
|
||||
except ValueError as err:
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
import contracts/requests
|
||||
import contracts/storage
|
||||
import contracts/deployment
|
||||
import contracts/marketplace
|
||||
import contracts/market
|
||||
import contracts/proofs
|
||||
import contracts/interactions
|
||||
|
||||
export requests
|
||||
export storage
|
||||
export deployment
|
||||
export marketplace
|
||||
export market
|
||||
export proofs
|
||||
export interactions
|
||||
|
|
|
@ -20,7 +20,7 @@ import ethers
|
|||
|
||||
let address = # fill in address where the contract was deployed
|
||||
let provider = JsonRpcProvider.new("ws://localhost:8545")
|
||||
let storage = Storage.new(address, provider)
|
||||
let marketplace = Marketplace.new(address, provider)
|
||||
```
|
||||
|
||||
Setup client and host so that they can sign transactions; here we use the first
|
||||
|
@ -32,36 +32,6 @@ let client = provider.getSigner(accounts[0])
|
|||
let host = provider.getSigner(accounts[1])
|
||||
```
|
||||
|
||||
Collateral
|
||||
----------
|
||||
|
||||
Hosts need to put up collateral before participating in storage contracts.
|
||||
|
||||
A host can learn about the amount of collateral that is required:
|
||||
```nim
|
||||
let collateralAmount = await storage.collateralAmount()
|
||||
```
|
||||
|
||||
The host then needs to prepare a payment to the smart contract by calling the
|
||||
`approve` method on the [ERC20 token][2]. Note that interaction with ERC20
|
||||
contracts is not part of this library.
|
||||
|
||||
After preparing the payment, the host can deposit collateral:
|
||||
```nim
|
||||
await storage
|
||||
.connect(host)
|
||||
.deposit(collateralAmount)
|
||||
```
|
||||
|
||||
When a host is not participating in storage offers or contracts, it can withdraw
|
||||
its collateral:
|
||||
|
||||
```
|
||||
await storage
|
||||
.connect(host)
|
||||
.withdraw()
|
||||
```
|
||||
|
||||
Storage requests
|
||||
----------------
|
||||
|
||||
|
@ -82,9 +52,7 @@ let request : StorageRequest = (
|
|||
|
||||
When a client wants to submit this request to the network, it needs to pay the
|
||||
maximum price to the smart contract in advance. The difference between the
|
||||
maximum price and the offered price will be reimbursed later. To prepare, the
|
||||
client needs to call the `approve` method on the [ERC20 token][2]. Note that
|
||||
interaction with ERC20 contracts is not part of this library.
|
||||
maximum price and the offered price will be reimbursed later.
|
||||
|
||||
Once the payment has been prepared, the client can submit the request to the
|
||||
network:
|
||||
|
@ -151,7 +119,7 @@ Storage proofs
|
|||
Time is divided into periods, and each period a storage proof may be required
|
||||
from the host. The odds of requiring a storage proof are negotiated through the
|
||||
storage request. For more details about the timing of storage proofs, please
|
||||
refer to the [design document][3].
|
||||
refer to the [design document][2].
|
||||
|
||||
At the start of each period of time, the host can check whether a storage proof
|
||||
is required:
|
||||
|
@ -176,6 +144,5 @@ await storage
|
|||
.markProofAsMissing(id, period)
|
||||
```
|
||||
|
||||
[1]: https://github.com/status-im/dagger-contracts/
|
||||
[2]: https://ethereum.org/en/developers/docs/standards/tokens/erc-20/
|
||||
[3]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
||||
[1]: https://github.com/status-im/codex-contracts-eth/
|
||||
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
||||
|
|
|
@ -3,41 +3,69 @@ import pkg/ethers
|
|||
import pkg/chronos
|
||||
import pkg/stint
|
||||
import ../clock
|
||||
import ../conf
|
||||
|
||||
export clock
|
||||
|
||||
logScope:
|
||||
topics = "contracts clock"
|
||||
|
||||
type
|
||||
OnChainClock* = ref object of Clock
|
||||
provider: Provider
|
||||
subscription: Subscription
|
||||
offset: times.Duration
|
||||
blockNumber: UInt256
|
||||
started: bool
|
||||
newBlock: AsyncEvent
|
||||
|
||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||
OnChainClock(provider: provider)
|
||||
OnChainClock(provider: provider, newBlock: newAsyncEvent())
|
||||
|
||||
proc start*(clock: OnChainClock) {.async.} =
|
||||
if clock.started:
|
||||
return
|
||||
clock.started = true
|
||||
|
||||
proc onBlock(blck: Block) {.async, upraises:[].} =
|
||||
proc update(clock: OnChainClock, blck: Block) =
|
||||
if number =? blck.number and number > clock.blockNumber:
|
||||
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
|
||||
let computerTime = getTime()
|
||||
clock.offset = blockTime - computerTime
|
||||
clock.blockNumber = number
|
||||
trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset
|
||||
clock.newBlock.fire()
|
||||
|
||||
if latestBlock =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
await onBlock(latestBlock)
|
||||
proc update(clock: OnChainClock) {.async.} =
|
||||
try:
|
||||
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
clock.update(latest)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
debug "error updating clock: ", error=error.msg
|
||||
discard
|
||||
|
||||
method start*(clock: OnChainClock) {.async.} =
|
||||
if clock.started:
|
||||
return
|
||||
|
||||
proc onBlock(_: Block) =
|
||||
# ignore block parameter; hardhat may call this with pending blocks
|
||||
asyncSpawn clock.update()
|
||||
|
||||
await clock.update()
|
||||
|
||||
clock.subscription = await clock.provider.subscribe(onBlock)
|
||||
clock.started = true
|
||||
|
||||
proc stop*(clock: OnChainClock) {.async.} =
|
||||
method stop*(clock: OnChainClock) {.async.} =
|
||||
if not clock.started:
|
||||
return
|
||||
clock.started = false
|
||||
|
||||
await clock.subscription.unsubscribe()
|
||||
clock.started = false
|
||||
|
||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||
doAssert clock.started, "clock should be started before calling now()"
|
||||
toUnix(getTime() + clock.offset)
|
||||
return toUnix(getTime() + clock.offset)
|
||||
|
||||
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
|
||||
while (let difference = time - clock.now(); difference > 0):
|
||||
clock.newBlock.clear()
|
||||
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
import pkg/contractabi
|
||||
import pkg/ethers/fields
|
||||
import pkg/questionable/results
|
||||
|
||||
export contractabi
|
||||
|
||||
type
|
||||
MarketplaceConfig* = object
|
||||
collateral*: CollateralConfig
|
||||
proofs*: ProofConfig
|
||||
CollateralConfig* = object
|
||||
repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed
|
||||
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
||||
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
|
||||
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
||||
ProofConfig* = object
|
||||
period*: UInt256 # proofs requirements are calculated per period (in seconds)
|
||||
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
|
||||
downtime*: uint8 # ignore this much recent blocks for proof requirements
|
||||
zkeyHash*: string # hash of the zkey file which is linked to the verifier
|
||||
# Ensures the pointer does not remain in downtime for many consecutive
|
||||
# periods. For each period increase, move the pointer `pointerProduct`
|
||||
# blocks. Should be a prime number to ensure there are no cycles.
|
||||
downtimeProduct*: uint8
|
||||
|
||||
|
||||
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
||||
ProofConfig(
|
||||
period: tupl[0],
|
||||
timeout: tupl[1],
|
||||
downtime: tupl[2],
|
||||
zkeyHash: tupl[3],
|
||||
downtimeProduct: tupl[4]
|
||||
)
|
||||
|
||||
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
||||
CollateralConfig(
|
||||
repairRewardPercentage: tupl[0],
|
||||
maxNumberOfSlashes: tupl[1],
|
||||
slashCriterion: tupl[2],
|
||||
slashPercentage: tupl[3]
|
||||
)
|
||||
|
||||
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
||||
MarketplaceConfig(
|
||||
collateral: tupl[0],
|
||||
proofs: tupl[1]
|
||||
)
|
||||
|
||||
func solidityType*(_: type ProofConfig): string =
|
||||
solidityType(ProofConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type CollateralConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type MarketplaceConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: CollateralConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
|
||||
let tupl = ?decoder.read(ProofConfig.fieldTypes)
|
||||
success ProofConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
|
||||
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
|
||||
success CollateralConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T =
|
||||
let tupl = ?decoder.read(MarketplaceConfig.fieldTypes)
|
||||
success MarketplaceConfig.fromTuple(tupl)
|
|
@ -1,26 +1,43 @@
|
|||
import std/json
|
||||
import std/os
|
||||
import std/tables
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
|
||||
type Deployment* = object
|
||||
json: JsonNode
|
||||
import ../conf
|
||||
import ../logutils
|
||||
import ./marketplace
|
||||
|
||||
const defaultFile = "vendor" / "dagger-contracts" / "deployment-localhost.json"
|
||||
type Deployment* = ref object
|
||||
provider: Provider
|
||||
config: CodexConf
|
||||
|
||||
## Reads deployment information from a json file. It expects a file that has
|
||||
## been exported with Hardhat deploy.
|
||||
## See also:
|
||||
## https://github.com/wighawag/hardhat-deploy/tree/master#6-hardhat-export
|
||||
proc deployment*(file = defaultFile): Deployment =
|
||||
Deployment(json: parseFile(file))
|
||||
const knownAddresses = {
|
||||
# Hardhat localhost network
|
||||
"31337": {
|
||||
"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"),
|
||||
}.toTable,
|
||||
# Taiko Alpha-3 Testnet
|
||||
"167005": {
|
||||
"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")
|
||||
}.toTable
|
||||
}.toTable
|
||||
|
||||
proc address*(deployment: Deployment, Contract: typedesc): ?Address =
|
||||
if deployment.json == nil:
|
||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||
let id = chainId.toString(10)
|
||||
notice "Looking for well-known contract address with ChainID ", chainId=id
|
||||
|
||||
if not (id in knownAddresses):
|
||||
return none Address
|
||||
|
||||
try:
|
||||
let address = deployment.json["contracts"][$Contract]["address"].getStr()
|
||||
Address.init(address)
|
||||
except KeyError:
|
||||
none Address
|
||||
return knownAddresses[id].getOrDefault($T, Address.none)
|
||||
|
||||
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
|
||||
Deployment(provider: provider, config: config)
|
||||
|
||||
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
|
||||
when contract is Marketplace:
|
||||
if address =? deployment.config.marketplaceAddress:
|
||||
return some address
|
||||
|
||||
let chainId = await deployment.provider.getChainId()
|
||||
return contract.getKnownAddress(chainId)
|
||||
|
|
|
@ -1,78 +1,9 @@
|
|||
import pkg/ethers
|
||||
import pkg/chronicles
|
||||
import ../purchasing
|
||||
import ../sales
|
||||
import ../proving
|
||||
import ./deployment
|
||||
import ./storage
|
||||
import ./market
|
||||
import ./proofs
|
||||
import ./clock
|
||||
import ./interactions/interactions
|
||||
import ./interactions/hostinteractions
|
||||
import ./interactions/clientinteractions
|
||||
import ./interactions/validatorinteractions
|
||||
|
||||
export purchasing
|
||||
export sales
|
||||
export proving
|
||||
export chronicles
|
||||
|
||||
type
|
||||
ContractInteractions* = ref object
|
||||
purchasing*: Purchasing
|
||||
sales*: Sales
|
||||
proving*: Proving
|
||||
clock: OnChainClock
|
||||
|
||||
proc new*(_: type ContractInteractions,
|
||||
signer: Signer,
|
||||
deployment: Deployment): ?ContractInteractions =
|
||||
|
||||
without address =? deployment.address(Storage):
|
||||
error "Unable to determine address of the Storage smart contract"
|
||||
return none ContractInteractions
|
||||
|
||||
let contract = Storage.new(address, signer)
|
||||
let market = OnChainMarket.new(contract)
|
||||
let proofs = OnChainProofs.new(contract)
|
||||
let clock = OnChainClock.new(signer.provider)
|
||||
let proving = Proving.new(proofs, clock)
|
||||
some ContractInteractions(
|
||||
purchasing: Purchasing.new(market, clock),
|
||||
sales: Sales.new(market, clock, proving),
|
||||
proving: proving,
|
||||
clock: clock
|
||||
)
|
||||
|
||||
proc new*(_: type ContractInteractions,
|
||||
providerUrl: string,
|
||||
account: Address,
|
||||
deploymentFile: string = string.default): ?ContractInteractions =
|
||||
|
||||
let provider = JsonRpcProvider.new(providerUrl)
|
||||
let signer = provider.getSigner(account)
|
||||
|
||||
var deploy: Deployment
|
||||
try:
|
||||
if deploymentFile == string.default:
|
||||
deploy = deployment()
|
||||
else:
|
||||
deploy = deployment(deploymentFile)
|
||||
except IOError as e:
|
||||
error "Unable to read deployment json", msg = e.msg
|
||||
return none ContractInteractions
|
||||
|
||||
ContractInteractions.new(signer, deploy)
|
||||
|
||||
proc new*(_: type ContractInteractions,
|
||||
account: Address): ?ContractInteractions =
|
||||
ContractInteractions.new("ws://localhost:8545", account)
|
||||
|
||||
proc start*(interactions: ContractInteractions) {.async.} =
|
||||
await interactions.clock.start()
|
||||
await interactions.sales.start()
|
||||
await interactions.proving.start()
|
||||
await interactions.purchasing.start()
|
||||
|
||||
proc stop*(interactions: ContractInteractions) {.async.} =
|
||||
await interactions.purchasing.stop()
|
||||
await interactions.sales.stop()
|
||||
await interactions.proving.stop()
|
||||
await interactions.clock.stop()
|
||||
export interactions
|
||||
export hostinteractions
|
||||
export clientinteractions
|
||||
export validatorinteractions
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
import pkg/ethers
|
||||
|
||||
import ../../purchasing
|
||||
import ../../logutils
|
||||
import ../market
|
||||
import ../clock
|
||||
import ./interactions
|
||||
|
||||
export purchasing
|
||||
export logutils
|
||||
|
||||
type
|
||||
ClientInteractions* = ref object of ContractInteractions
|
||||
purchasing*: Purchasing
|
||||
|
||||
proc new*(_: type ClientInteractions,
|
||||
clock: OnChainClock,
|
||||
purchasing: Purchasing): ClientInteractions =
|
||||
ClientInteractions(clock: clock, purchasing: purchasing)
|
||||
|
||||
proc start*(self: ClientInteractions) {.async.} =
|
||||
await procCall ContractInteractions(self).start()
|
||||
await self.purchasing.start()
|
||||
|
||||
proc stop*(self: ClientInteractions) {.async.} =
|
||||
await self.purchasing.stop()
|
||||
await procCall ContractInteractions(self).stop()
|
|
@ -0,0 +1,29 @@
|
|||
import pkg/chronos
|
||||
|
||||
import ../../logutils
|
||||
import ../../sales
|
||||
import ./interactions
|
||||
|
||||
export sales
|
||||
export logutils
|
||||
|
||||
type
|
||||
HostInteractions* = ref object of ContractInteractions
|
||||
sales*: Sales
|
||||
|
||||
proc new*(
|
||||
_: type HostInteractions,
|
||||
clock: Clock,
|
||||
sales: Sales
|
||||
): HostInteractions =
|
||||
## Create a new HostInteractions instance
|
||||
##
|
||||
HostInteractions(clock: clock, sales: sales)
|
||||
|
||||
method start*(self: HostInteractions) {.async.} =
|
||||
await procCall ContractInteractions(self).start()
|
||||
await self.sales.start()
|
||||
|
||||
method stop*(self: HostInteractions) {.async.} =
|
||||
await self.sales.stop()
|
||||
await procCall ContractInteractions(self).start()
|
|
@ -0,0 +1,16 @@
|
|||
import pkg/ethers
|
||||
import ../clock
|
||||
import ../marketplace
|
||||
import ../market
|
||||
|
||||
export clock
|
||||
|
||||
type
|
||||
ContractInteractions* = ref object of RootObj
|
||||
clock*: Clock
|
||||
|
||||
method start*(self: ContractInteractions) {.async, base.} =
|
||||
discard
|
||||
|
||||
method stop*(self: ContractInteractions) {.async, base.} =
|
||||
discard
|
|
@ -0,0 +1,21 @@
|
|||
import ./interactions
|
||||
import ../../validation
|
||||
|
||||
export validation
|
||||
|
||||
type
|
||||
ValidatorInteractions* = ref object of ContractInteractions
|
||||
validation: Validation
|
||||
|
||||
proc new*(_: type ValidatorInteractions,
|
||||
clock: OnChainClock,
|
||||
validation: Validation): ValidatorInteractions =
|
||||
ValidatorInteractions(clock: clock, validation: validation)
|
||||
|
||||
proc start*(self: ValidatorInteractions) {.async.} =
|
||||
await procCall ContractInteractions(self).start()
|
||||
await self.validation.start()
|
||||
|
||||
proc stop*(self: ValidatorInteractions) {.async.} =
|
||||
await self.validation.stop()
|
||||
await procCall ContractInteractions(self).stop()
|
|
@ -1,99 +1,282 @@
|
|||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/sugar
|
||||
import pkg/ethers
|
||||
import pkg/ethers/testing
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import ../utils/exceptions
|
||||
import ../logutils
|
||||
import ../market
|
||||
import ./storage
|
||||
import ./marketplace
|
||||
import ./proofs
|
||||
|
||||
export market
|
||||
|
||||
logScope:
|
||||
topics = "marketplace onchain market"
|
||||
|
||||
type
|
||||
OnChainMarket* = ref object of Market
|
||||
contract: Storage
|
||||
contract: Marketplace
|
||||
signer: Signer
|
||||
MarketSubscription = market.Subscription
|
||||
EventSubscription = ethers.Subscription
|
||||
OnChainMarketSubscription = ref object of MarketSubscription
|
||||
eventSubscription: EventSubscription
|
||||
|
||||
func new*(_: type OnChainMarket, contract: Storage): OnChainMarket =
|
||||
func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket =
|
||||
without signer =? contract.signer:
|
||||
raiseAssert("Storage contract should have a signer")
|
||||
raiseAssert("Marketplace contract should have a signer")
|
||||
OnChainMarket(
|
||||
contract: contract,
|
||||
signer: signer,
|
||||
)
|
||||
|
||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||
raise newException(MarketError, message)
|
||||
|
||||
template convertEthersError(body) =
|
||||
try:
|
||||
body
|
||||
except EthersError as error:
|
||||
raiseMarketError(error.msgDetail)
|
||||
|
||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError:
|
||||
let tokenAddress = await market.contract.token()
|
||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||
discard await token.increaseAllowance(market.contract.address(), amount).confirm(0)
|
||||
|
||||
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
||||
let config = await market.contract.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
return await market.signer.getAddress()
|
||||
convertEthersError:
|
||||
return await market.signer.getAddress()
|
||||
|
||||
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.config()
|
||||
let period = config.proofs.period
|
||||
return Periodicity(seconds: period)
|
||||
|
||||
method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.config()
|
||||
return config.proofs.timeout
|
||||
|
||||
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.config()
|
||||
return config.proofs.downtime
|
||||
|
||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getPointer(slotId, overrides)
|
||||
|
||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||
return await market.contract.myRequests
|
||||
convertEthersError:
|
||||
return await market.contract.myRequests
|
||||
|
||||
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||
convertEthersError:
|
||||
let slots = await market.contract.mySlots()
|
||||
debug "Fetched my slots", numSlots=len(slots)
|
||||
|
||||
return slots
|
||||
|
||||
method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
|
||||
await market.contract.requestStorage(request)
|
||||
convertEthersError:
|
||||
debug "Requesting storage"
|
||||
await market.approveFunds(request.price())
|
||||
discard await market.contract.requestStorage(request).confirm(0)
|
||||
|
||||
method getRequest(market: OnChainMarket,
|
||||
id: RequestId): Future[?StorageRequest] {.async.} =
|
||||
try:
|
||||
return some await market.contract.getRequest(id)
|
||||
except ProviderError as e:
|
||||
if e.revertReason.contains("Unknown request"):
|
||||
return none StorageRequest
|
||||
raise e
|
||||
convertEthersError:
|
||||
try:
|
||||
return some await market.contract.getRequest(id)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Unknown request"):
|
||||
return none StorageRequest
|
||||
raise e
|
||||
|
||||
method getState*(market: OnChainMarket,
|
||||
requestId: RequestId): Future[?RequestState] {.async.} =
|
||||
try:
|
||||
return some await market.contract.state(requestId)
|
||||
except ProviderError as e:
|
||||
if e.revertReason.contains("Unknown request"):
|
||||
return none RequestState
|
||||
raise e
|
||||
method requestState*(market: OnChainMarket,
|
||||
requestId: RequestId): Future[?RequestState] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return some await market.contract.requestState(requestId, overrides)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Unknown request"):
|
||||
return none RequestState
|
||||
raise e
|
||||
|
||||
method slotState*(market: OnChainMarket,
|
||||
slotId: SlotId): Future[SlotState] {.async.} =
|
||||
convertEthersError:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.slotState(slotId, overrides)
|
||||
|
||||
method getRequestEnd*(market: OnChainMarket,
|
||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
||||
return await market.contract.requestEnd(id)
|
||||
convertEthersError:
|
||||
return await market.contract.requestEnd(id)
|
||||
|
||||
method requestExpiresAt*(market: OnChainMarket,
|
||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
return await market.contract.requestExpiry(id)
|
||||
|
||||
method getHost(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256): Future[?Address] {.async.} =
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
if address != Address.default:
|
||||
return some address
|
||||
else:
|
||||
return none Address
|
||||
convertEthersError:
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
if address != Address.default:
|
||||
return some address
|
||||
else:
|
||||
return none Address
|
||||
|
||||
method getActiveSlot*(market: OnChainMarket,
|
||||
slotId: SlotId): Future[?Slot] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
return some await market.contract.getActiveSlot(slotId)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Slot is free"):
|
||||
return none Slot
|
||||
raise e
|
||||
|
||||
method fillSlot(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
proof: seq[byte]) {.async.} =
|
||||
await market.contract.fillSlot(requestId, slotIndex, proof)
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256) {.async.} =
|
||||
convertEthersError:
|
||||
await market.approveFunds(collateral)
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(0)
|
||||
|
||||
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract.freeSlot(slotId).confirm(0)
|
||||
|
||||
method withdrawFunds(market: OnChainMarket,
|
||||
requestId: RequestId) {.async.} =
|
||||
await market.contract.withdrawFunds(requestId)
|
||||
convertEthersError:
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(0)
|
||||
|
||||
method subscribeRequests(market: OnChainMarket,
|
||||
method isProofRequired*(market: OnChainMarket,
|
||||
id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.isProofRequired(id, overrides)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Slot is free"):
|
||||
return false
|
||||
raise e
|
||||
|
||||
method willProofBeRequired*(market: OnChainMarket,
|
||||
id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.willProofBeRequired(id, overrides)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Slot is free"):
|
||||
return false
|
||||
raise e
|
||||
|
||||
method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} =
|
||||
convertEthersError:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getChallenge(id, overrides)
|
||||
|
||||
method submitProof*(market: OnChainMarket,
|
||||
id: SlotId,
|
||||
proof: Groth16Proof) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract.submitProof(id, proof).confirm(0)
|
||||
|
||||
method markProofAsMissing*(market: OnChainMarket,
|
||||
id: SlotId,
|
||||
period: Period) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract.markProofAsMissing(id, period).confirm(0)
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
market: OnChainMarket,
|
||||
id: SlotId,
|
||||
period: Period
|
||||
): Future[bool] {.async.} =
|
||||
let provider = market.contract.provider
|
||||
let contractWithoutSigner = market.contract.connect(provider)
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
try:
|
||||
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
|
||||
return true
|
||||
except EthersError as e:
|
||||
trace "Proof cannot be marked as missing", msg = e.msg
|
||||
return false
|
||||
|
||||
method subscribeRequests*(market: OnChainMarket,
|
||||
callback: OnRequest):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: StorageRequested) {.upraises:[].} =
|
||||
callback(event.requestId, event.ask)
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
callback(event.requestId,
|
||||
event.ask,
|
||||
event.expiry)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(market: OnChainMarket,
|
||||
callback: OnSlotFilled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: SlotFilled) {.upraises:[].} =
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
callback: OnSlotFilled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: SlotFilled) {.upraises:[].} =
|
||||
if event.requestId == requestId and event.slotIndex == slotIndex:
|
||||
callback(event.requestId, event.slotIndex)
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
|
||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||
callback(requestId, slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
return await market.subscribeSlotFilled(onSlotFilled)
|
||||
|
||||
method subscribeSlotFreed*(market: OnChainMarket,
|
||||
callback: OnSlotFreed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: SlotFreed) {.upraises:[].} =
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(market: OnChainMarket,
|
||||
callback: OnFulfillment):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
|
@ -102,8 +285,20 @@ method subscribeFulfillment(market: OnChainMarket,
|
|||
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
callback: OnRequestCancelled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
|
@ -112,18 +307,63 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
|||
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFailed) {.upraises:[].} =
|
||||
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeProofSubmission*(market: OnChainMarket,
|
||||
callback: OnProofSubmitted):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ProofSubmitted) {.upraises: [].} =
|
||||
callback(event.id)
|
||||
|
||||
convertEthersError:
|
||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
await subscription.eventSubscription.unsubscribe()
|
||||
|
||||
method queryPastStorageRequests*(market: OnChainMarket,
|
||||
blocksAgo: int):
|
||||
Future[seq[PastStorageRequest]] {.async.} =
|
||||
convertEthersError:
|
||||
let contract = market.contract
|
||||
let provider = contract.provider
|
||||
|
||||
let head = await provider.getBlockNumber()
|
||||
let fromBlock = BlockTag.init(head - blocksAgo.abs.u256)
|
||||
|
||||
let events = await contract.queryFilter(StorageRequested,
|
||||
fromBlock,
|
||||
BlockTag.latest)
|
||||
return events.map(event =>
|
||||
PastStorageRequest(
|
||||
requestId: event.requestId,
|
||||
ask: event.ask,
|
||||
expiry: event.expiry
|
||||
)
|
||||
)
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
import pkg/ethers
|
||||
import pkg/ethers/erc20
|
||||
import pkg/json_rpc/rpcclient
|
||||
import pkg/stint
|
||||
import pkg/chronos
|
||||
import ../clock
|
||||
import ./requests
|
||||
import ./proofs
|
||||
import ./config
|
||||
|
||||
export stint
|
||||
export ethers except `%`, `%*`, toJson
|
||||
export erc20 except `%`, `%*`, toJson
|
||||
export config
|
||||
export requests
|
||||
|
||||
type
|
||||
Marketplace* = ref object of Contract
|
||||
StorageRequested* = object of Event
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: UInt256
|
||||
SlotFilled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
SlotFreed* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
RequestFulfilled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
RequestCancelled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
RequestFailed* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
ProofSubmitted* = object of Event
|
||||
id*: SlotId
|
||||
|
||||
|
||||
proc config*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
|
||||
proc requestStorage*(marketplace: Marketplace, request: StorageRequest): ?TransactionResponse {.contract.}
|
||||
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): ?TransactionResponse {.contract.}
|
||||
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): ?TransactionResponse {.contract.}
|
||||
proc freeSlot*(marketplace: Marketplace, id: SlotId): ?TransactionResponse {.contract.}
|
||||
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.}
|
||||
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
|
||||
proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.}
|
||||
|
||||
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
|
||||
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
|
||||
proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.}
|
||||
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
|
||||
proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||
proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
|
||||
proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.}
|
||||
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
|
||||
|
||||
proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): ?TransactionResponse {.contract.}
|
||||
proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): ?TransactionResponse {.contract.}
|
|
@ -1,68 +1,43 @@
|
|||
import std/strutils
|
||||
import pkg/ethers
|
||||
import pkg/ethers/testing
|
||||
import ../storageproofs/timing/proofs
|
||||
import ./storage
|
||||
|
||||
export proofs
|
||||
import pkg/stint
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/fields
|
||||
|
||||
type
|
||||
OnChainProofs* = ref object of Proofs
|
||||
storage: Storage
|
||||
pollInterval*: Duration
|
||||
ProofsSubscription = proofs.Subscription
|
||||
EventSubscription = ethers.Subscription
|
||||
OnChainProofsSubscription = ref object of ProofsSubscription
|
||||
eventSubscription: EventSubscription
|
||||
Groth16Proof* = object
|
||||
a*: G1Point
|
||||
b*: G2Point
|
||||
c*: G1Point
|
||||
G1Point* = object
|
||||
x*: UInt256
|
||||
y*: UInt256
|
||||
# A field element F_{p^2} encoded as `real + i * imag`
|
||||
Fp2Element* = object
|
||||
real*: UInt256
|
||||
imag*: UInt256
|
||||
G2Point* = object
|
||||
x*: Fp2Element
|
||||
y*: Fp2Element
|
||||
|
||||
const DefaultPollInterval = 3.seconds
|
||||
func solidityType*(_: type G1Point): string =
|
||||
solidityType(G1Point.fieldTypes)
|
||||
|
||||
proc new*(_: type OnChainProofs, storage: Storage): OnChainProofs =
|
||||
OnChainProofs(storage: storage, pollInterval: DefaultPollInterval)
|
||||
func solidityType*(_: type Fp2Element): string =
|
||||
solidityType(Fp2Element.fieldTypes)
|
||||
|
||||
method periodicity*(proofs: OnChainProofs): Future[Periodicity] {.async.} =
|
||||
let period = await proofs.storage.proofPeriod()
|
||||
return Periodicity(seconds: period)
|
||||
func solidityType*(_: type G2Point): string =
|
||||
solidityType(G2Point.fieldTypes)
|
||||
|
||||
method isProofRequired*(proofs: OnChainProofs,
|
||||
id: SlotId): Future[bool] {.async.} =
|
||||
try:
|
||||
return await proofs.storage.isProofRequired(id)
|
||||
except ProviderError as e:
|
||||
if e.revertReason.contains("Slot empty"):
|
||||
return false
|
||||
raise e
|
||||
func solidityType*(_: type Groth16Proof): string =
|
||||
solidityType(Groth16Proof.fieldTypes)
|
||||
|
||||
method willProofBeRequired*(proofs: OnChainProofs,
|
||||
id: SlotId): Future[bool] {.async.} =
|
||||
try:
|
||||
return await proofs.storage.willProofBeRequired(id)
|
||||
except ProviderError as e:
|
||||
if e.revertReason.contains("Slot empty"):
|
||||
return false
|
||||
raise e
|
||||
func encode*(encoder: var AbiEncoder, point: G1Point) =
|
||||
encoder.write(point.fieldValues)
|
||||
|
||||
method getProofEnd*(proofs: OnChainProofs,
|
||||
id: SlotId): Future[UInt256] {.async.} =
|
||||
try:
|
||||
return await proofs.storage.proofEnd(id)
|
||||
except ProviderError as e:
|
||||
if e.revertReason.contains("Slot empty"):
|
||||
return 0.u256
|
||||
raise e
|
||||
func encode*(encoder: var AbiEncoder, element: Fp2Element) =
|
||||
encoder.write(element.fieldValues)
|
||||
|
||||
method submitProof*(proofs: OnChainProofs,
|
||||
id: SlotId,
|
||||
proof: seq[byte]) {.async.} =
|
||||
await proofs.storage.submitProof(id, proof)
|
||||
func encode*(encoder: var AbiEncoder, point: G2Point) =
|
||||
encoder.write(point.fieldValues)
|
||||
|
||||
method subscribeProofSubmission*(proofs: OnChainProofs,
|
||||
callback: OnProofSubmitted):
|
||||
Future[ProofsSubscription] {.async.} =
|
||||
proc onEvent(event: ProofSubmitted) {.upraises: [].} =
|
||||
callback(event.id, event.proof)
|
||||
let subscription = await proofs.storage.subscribe(ProofSubmitted, onEvent)
|
||||
return OnChainProofsSubscription(eventSubscription: subscription)
|
||||
|
||||
method unsubscribe*(subscription: OnChainProofsSubscription) {.async, upraises:[].} =
|
||||
await subscription.eventSubscription.unsubscribe()
|
||||
func encode*(encoder: var AbiEncoder, proof: Groth16Proof) =
|
||||
encoder.write(proof.fieldValues)
|
||||
|
|
|
@ -1,35 +1,38 @@
|
|||
import std/hashes
|
||||
import std/sequtils
|
||||
import std/typetraits
|
||||
import pkg/contractabi
|
||||
import pkg/nimcrypto
|
||||
import pkg/ethers/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/upraises
|
||||
import ../logutils
|
||||
import ../utils/json
|
||||
|
||||
export contractabi
|
||||
|
||||
type
|
||||
StorageRequest* = object
|
||||
client*: Address
|
||||
ask*: StorageAsk
|
||||
content*: StorageContent
|
||||
expiry*: UInt256
|
||||
client* {.serialize.}: Address
|
||||
ask* {.serialize.}: StorageAsk
|
||||
content* {.serialize.}: StorageContent
|
||||
expiry* {.serialize.}: UInt256
|
||||
nonce*: Nonce
|
||||
StorageAsk* = object
|
||||
slots*: uint64
|
||||
slotSize*: UInt256
|
||||
duration*: UInt256
|
||||
proofProbability*: UInt256
|
||||
reward*: UInt256
|
||||
maxSlotLoss*: uint64
|
||||
slots* {.serialize.}: uint64
|
||||
slotSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
reward* {.serialize.}: UInt256
|
||||
collateral* {.serialize.}: UInt256
|
||||
maxSlotLoss* {.serialize.}: uint64
|
||||
StorageContent* = object
|
||||
cid*: string
|
||||
erasure*: StorageErasure
|
||||
por*: StoragePoR
|
||||
StorageErasure* = object
|
||||
totalChunks*: uint64
|
||||
StoragePoR* = object
|
||||
u*: seq[byte]
|
||||
publicKey*: seq[byte]
|
||||
name*: seq[byte]
|
||||
cid* {.serialize.}: string
|
||||
merkleRoot*: array[32, byte]
|
||||
Slot* = object
|
||||
request* {.serialize.}: StorageRequest
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
SlotId* = distinct array[32, byte]
|
||||
RequestId* = distinct array[32, byte]
|
||||
Nonce* = distinct array[32, byte]
|
||||
|
@ -39,11 +42,20 @@ type
|
|||
Cancelled
|
||||
Finished
|
||||
Failed
|
||||
SlotState* {.pure.} = enum
|
||||
Free
|
||||
Filled
|
||||
Finished
|
||||
Failed
|
||||
Paid
|
||||
Cancelled
|
||||
|
||||
proc `==`*(x, y: Nonce): bool {.borrow.}
|
||||
proc `==`*(x, y: RequestId): bool {.borrow.}
|
||||
proc `==`*(x, y: SlotId): bool {.borrow.}
|
||||
proc hash*(x: SlotId): Hash {.borrow.}
|
||||
proc hash*(x: Nonce): Hash {.borrow.}
|
||||
proc hash*(x: Address): Hash {.borrow.}
|
||||
|
||||
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
|
||||
array[32, byte](id)
|
||||
|
@ -51,6 +63,30 @@ func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
|
|||
proc `$`*(id: RequestId | SlotId | Nonce): string =
|
||||
id.toArray.toHex
|
||||
|
||||
proc fromHex*(T: type RequestId, hex: string): T =
|
||||
T array[32, byte].fromHex(hex)
|
||||
|
||||
proc fromHex*(T: type SlotId, hex: string): T =
|
||||
T array[32, byte].fromHex(hex)
|
||||
|
||||
proc fromHex*(T: type Nonce, hex: string): T =
|
||||
T array[32, byte].fromHex(hex)
|
||||
|
||||
proc fromHex*[T: distinct](_: type T, hex: string): T =
|
||||
type baseType = T.distinctBase
|
||||
T baseType.fromHex(hex)
|
||||
|
||||
proc toHex*[T: distinct](id: T): string =
|
||||
type baseType = T.distinctBase
|
||||
baseType(id).toHex
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog
|
||||
|
||||
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
||||
StorageRequest(
|
||||
client: tupl[0],
|
||||
|
@ -60,6 +96,12 @@ func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
|||
nonce: tupl[4]
|
||||
)
|
||||
|
||||
func fromTuple(_: type Slot, tupl: tuple): Slot =
|
||||
Slot(
|
||||
request: tupl[0],
|
||||
slotIndex: tupl[1]
|
||||
)
|
||||
|
||||
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||
StorageAsk(
|
||||
slots: tupl[0],
|
||||
|
@ -67,34 +109,16 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
|||
duration: tupl[2],
|
||||
proofProbability: tupl[3],
|
||||
reward: tupl[4],
|
||||
maxSlotLoss: tupl[5]
|
||||
collateral: tupl[5],
|
||||
maxSlotLoss: tupl[6]
|
||||
)
|
||||
|
||||
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
||||
StorageContent(
|
||||
cid: tupl[0],
|
||||
erasure: tupl[1],
|
||||
por: tupl[2]
|
||||
merkleRoot: tupl[1]
|
||||
)
|
||||
|
||||
func fromTuple(_: type StorageErasure, tupl: tuple): StorageErasure =
|
||||
StorageErasure(
|
||||
totalChunks: tupl[0]
|
||||
)
|
||||
|
||||
func fromTuple(_: type StoragePoR, tupl: tuple): StoragePoR =
|
||||
StoragePoR(
|
||||
u: tupl[0],
|
||||
publicKey: tupl[1],
|
||||
name: tupl[2]
|
||||
)
|
||||
|
||||
func solidityType*(_: type StoragePoR): string =
|
||||
solidityType(StoragePoR.fieldTypes)
|
||||
|
||||
func solidityType*(_: type StorageErasure): string =
|
||||
solidityType(StorageErasure.fieldTypes)
|
||||
|
||||
func solidityType*(_: type StorageContent): string =
|
||||
solidityType(StorageContent.fieldTypes)
|
||||
|
||||
|
@ -104,15 +128,6 @@ func solidityType*(_: type StorageAsk): string =
|
|||
func solidityType*(_: type StorageRequest): string =
|
||||
solidityType(StorageRequest.fieldTypes)
|
||||
|
||||
func solidityType*[T: RequestId | SlotId | Nonce](_: type T): string =
|
||||
solidityType(array[32, byte])
|
||||
|
||||
func encode*(encoder: var AbiEncoder, por: StoragePoR) =
|
||||
encoder.write(por.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, erasure: StorageErasure) =
|
||||
encoder.write(erasure.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, content: StorageContent) =
|
||||
encoder.write(content.fieldValues)
|
||||
|
||||
|
@ -125,18 +140,8 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
|
|||
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
|
||||
encoder.write(request.fieldValues)
|
||||
|
||||
func decode*[T: RequestId | SlotId | Nonce](decoder: var AbiDecoder,
|
||||
_: type T): ?!T =
|
||||
let nonce = ?decoder.read(type array[32, byte])
|
||||
success T(nonce)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StoragePoR): ?!T =
|
||||
let tupl = ?decoder.read(StoragePoR.fieldTypes)
|
||||
success StoragePoR.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageErasure): ?!T =
|
||||
let tupl = ?decoder.read(StorageErasure.fieldTypes)
|
||||
success StorageErasure.fromTuple(tupl)
|
||||
func encode*(encoder: var AbiEncoder, request: Slot) =
|
||||
encoder.write(request.fieldValues)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
|
||||
let tupl = ?decoder.read(StorageContent.fieldTypes)
|
||||
|
@ -150,6 +155,10 @@ func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T =
|
|||
let tupl = ?decoder.read(StorageRequest.fieldTypes)
|
||||
success StorageRequest.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
|
||||
let tupl = ?decoder.read(Slot.fieldTypes)
|
||||
success Slot.fromTuple(tupl)
|
||||
|
||||
func id*(request: StorageRequest): RequestId =
|
||||
let encoding = AbiEncoder.encode((request, ))
|
||||
RequestId(keccak256.digest(encoding).data)
|
||||
|
@ -161,6 +170,9 @@ func slotId*(requestId: RequestId, slot: UInt256): SlotId =
|
|||
func slotId*(request: StorageRequest, slot: UInt256): SlotId =
|
||||
slotId(request.id, slot)
|
||||
|
||||
func id*(slot: Slot): SlotId =
|
||||
slotId(slot.request, slot.slotIndex)
|
||||
|
||||
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.duration * ask.reward
|
||||
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
import pkg/ethers
|
||||
import pkg/json_rpc/rpcclient
|
||||
import pkg/stint
|
||||
import pkg/chronos
|
||||
import ../clock
|
||||
import ./requests
|
||||
|
||||
export stint
|
||||
export ethers
|
||||
|
||||
type
|
||||
Storage* = ref object of Contract
|
||||
StorageRequested* = object of Event
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
SlotFilled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex* {.indexed.}: UInt256
|
||||
slotId*: SlotId
|
||||
RequestFulfilled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
RequestCancelled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
RequestFailed* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
ProofSubmitted* = object of Event
|
||||
id*: SlotId
|
||||
proof*: seq[byte]
|
||||
|
||||
|
||||
proc collateralAmount*(storage: Storage): UInt256 {.contract, view.}
|
||||
proc slashMisses*(storage: Storage): UInt256 {.contract, view.}
|
||||
proc slashPercentage*(storage: Storage): UInt256 {.contract, view.}
|
||||
proc minCollateralThreshold*(storage: Storage): UInt256 {.contract, view.}
|
||||
|
||||
proc deposit*(storage: Storage, amount: UInt256) {.contract.}
|
||||
proc withdraw*(storage: Storage) {.contract.}
|
||||
proc balanceOf*(storage: Storage, account: Address): UInt256 {.contract, view.}
|
||||
|
||||
proc requestStorage*(storage: Storage, request: StorageRequest) {.contract.}
|
||||
proc fillSlot*(storage: Storage, requestId: RequestId, slotIndex: UInt256, proof: seq[byte]) {.contract.}
|
||||
proc withdrawFunds*(storage: Storage, requestId: RequestId) {.contract.}
|
||||
proc payoutSlot*(storage: Storage, requestId: RequestId, slotIndex: UInt256) {.contract.}
|
||||
proc getRequest*(storage: Storage, id: RequestId): StorageRequest {.contract, view.}
|
||||
proc getHost*(storage: Storage, id: SlotId): Address {.contract, view.}
|
||||
|
||||
proc myRequests*(storage: Storage): seq[RequestId] {.contract, view.}
|
||||
proc state*(storage: Storage, requestId: RequestId): RequestState {.contract, view.}
|
||||
proc requestEnd*(storage: Storage, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc proofPeriod*(storage: Storage): UInt256 {.contract, view.}
|
||||
proc proofTimeout*(storage: Storage): UInt256 {.contract, view.}
|
||||
|
||||
proc proofEnd*(storage: Storage, id: SlotId): UInt256 {.contract, view.}
|
||||
proc missingProofs*(storage: Storage, id: SlotId): UInt256 {.contract, view.}
|
||||
proc isProofRequired*(storage: Storage, id: SlotId): bool {.contract, view.}
|
||||
proc willProofBeRequired*(storage: Storage, id: SlotId): bool {.contract, view.}
|
||||
proc getChallenge*(storage: Storage, id: SlotId): array[32, byte] {.contract, view.}
|
||||
proc getPointer*(storage: Storage, id: SlotId): uint8 {.contract, view.}
|
||||
|
||||
proc submitProof*(storage: Storage, id: SlotId, proof: seq[byte]) {.contract.}
|
||||
proc markProofAsMissing*(storage: Storage, id: SlotId, period: UInt256) {.contract.}
|
|
@ -1,10 +0,0 @@
|
|||
import pkg/chronos
|
||||
import pkg/stint
|
||||
import pkg/ethers
|
||||
|
||||
type
|
||||
TestToken* = ref object of Contract
|
||||
|
||||
proc mint*(token: TestToken, holder: Address, amount: UInt256) {.contract.}
|
||||
proc approve*(token: TestToken, spender: Address, amount: UInt256) {.contract.}
|
||||
proc balanceOf*(token: TestToken, account: Address): UInt256 {.contract, view.}
|
|
@ -8,21 +8,19 @@
|
|||
## those terms.
|
||||
|
||||
import std/algorithm
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2p/signed_envelope
|
||||
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/shims/net
|
||||
import pkg/contractabi/address as ca
|
||||
import pkg/libp2pdht/discv5/protocol as discv5
|
||||
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
||||
|
||||
import ./rng
|
||||
import ./errors
|
||||
import ./formats
|
||||
import ./logutils
|
||||
|
||||
export discv5
|
||||
|
||||
|
@ -35,10 +33,10 @@ logScope:
|
|||
|
||||
type
|
||||
Discovery* = ref object of RootObj
|
||||
protocol: discv5.Protocol # dht protocol
|
||||
protocol*: discv5.Protocol # dht protocol
|
||||
key: PrivateKey # private key
|
||||
peerId: PeerId # the peer id of the local node
|
||||
announceAddrs: seq[MultiAddress] # addresses announced as part of the provider records
|
||||
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
||||
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
|
||||
# address that the node can be connected on
|
||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||
|
@ -57,7 +55,10 @@ proc toNodeId*(host: ca.Address): NodeId =
|
|||
|
||||
proc findPeer*(
|
||||
d: Discovery,
|
||||
peerId: PeerID): Future[?PeerRecord] {.async.} =
|
||||
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||
trace "protocol.resolve..."
|
||||
## Find peer using the given Discovery object
|
||||
##
|
||||
let
|
||||
node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
|
@ -72,27 +73,22 @@ method find*(
|
|||
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
## Find block providers
|
||||
##
|
||||
|
||||
trace "Finding providers for block", cid
|
||||
without providers =?
|
||||
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||
trace "Error finding providers for block", cid, error = error.msg
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
return providers
|
||||
return providers.filterIt( not (it.data.peerId == d.peerId) )
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||
## Provide a bock Cid
|
||||
## Provide a block Cid
|
||||
##
|
||||
|
||||
trace "Providing block", cid
|
||||
let
|
||||
nodes = await d.protocol.addProvider(
|
||||
cid.toNodeId(), d.providerRecord.get)
|
||||
|
||||
if nodes.len <= 0:
|
||||
trace "Couldn't provide to any nodes!"
|
||||
warn "Couldn't provide to any nodes!"
|
||||
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
|
||||
method find*(
|
||||
d: Discovery,
|
||||
|
@ -126,7 +122,9 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
|||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
|
||||
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base.} =
|
||||
method removeProvider*(
|
||||
d: Discovery,
|
||||
peerId: PeerId): Future[void] {.base.} =
|
||||
## Remove provider from providers table
|
||||
##
|
||||
|
||||
|
@ -160,6 +158,10 @@ proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
|
|||
IpTransportProtocol.udpProtocol,
|
||||
port)])).expect("Should construct signed record").some
|
||||
|
||||
if not d.protocol.isNil:
|
||||
d.protocol.updateRecord(d.dhtRecord)
|
||||
.expect("Should update SPR")
|
||||
|
||||
proc start*(d: Discovery) {.async.} =
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
|
@ -168,22 +170,36 @@ proc stop*(d: Discovery) {.async.} =
|
|||
await d.protocol.closeWait()
|
||||
|
||||
proc new*(
|
||||
T: type Discovery,
|
||||
key: PrivateKey,
|
||||
bindIp = ValidIpAddress.init(IPv4_any()),
|
||||
bindPort = 0.Port,
|
||||
announceAddrs: openArray[MultiAddress],
|
||||
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
||||
store: Datastore = SQLiteDatastore.new(Memory)
|
||||
.expect("Should not fail!")): T =
|
||||
T: type Discovery,
|
||||
key: PrivateKey,
|
||||
bindIp = ValidIpAddress.init(IPv4_any()),
|
||||
bindPort = 0.Port,
|
||||
announceAddrs: openArray[MultiAddress],
|
||||
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
||||
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
|
||||
): Discovery =
|
||||
## Create a new Discovery node instance for the given key and datastore
|
||||
##
|
||||
|
||||
var
|
||||
self = T(
|
||||
self = Discovery(
|
||||
key: key,
|
||||
peerId: PeerId.init(key).expect("Should construct PeerId"))
|
||||
|
||||
self.updateAnnounceRecord(announceAddrs)
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
|
||||
# and figure out proper solution.
|
||||
let discoveryConfig = DiscoveryConfig(
|
||||
tableIpLimits: TableIpLimits(
|
||||
tableIpLimit: high(uint),
|
||||
bucketIpLimit:high(uint)
|
||||
),
|
||||
bitsPerHop: DefaultBitsPerHop
|
||||
)
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
self.protocol = newProtocol(
|
||||
key,
|
||||
bindIp = bindIp.toNormalIp,
|
||||
|
@ -191,6 +207,7 @@ proc new*(
|
|||
record = self.providerRecord.get,
|
||||
bootstrapRecords = bootstrapNodes,
|
||||
rng = Rng.instance(),
|
||||
providers = ProvidersManager.new(store))
|
||||
providers = ProvidersManager.new(store),
|
||||
config = discoveryConfig)
|
||||
|
||||
self
|
||||
|
|
|
@ -12,8 +12,14 @@ import ./erasure/backends/leopard
|
|||
|
||||
export erasure
|
||||
|
||||
func leoEncoderProvider*(size, buffers, parity: int): EncoderBackend {.raises: [Defect].} =
|
||||
func leoEncoderProvider*(
|
||||
size, buffers, parity: int
|
||||
): EncoderBackend {.raises: [Defect].} =
|
||||
## create new Leo Encoder
|
||||
LeoEncoderBackend.new(size, buffers, parity)
|
||||
|
||||
func leoDecoderProvider*(size, buffers, parity: int): DecoderBackend {.raises: [Defect].} =
|
||||
LeoDecoderBackend.new(size, buffers, parity)
|
||||
func leoDecoderProvider*(
|
||||
size, buffers, parity: int
|
||||
): DecoderBackend {.raises: [Defect].} =
|
||||
## create new Leo Decoder
|
||||
LeoDecoderBackend.new(size, buffers, parity)
|
||||
|
|
|
@ -0,0 +1,225 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2024 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/taskpools
|
||||
import pkg/taskpools/flowvars
|
||||
import pkg/chronos
|
||||
import pkg/chronos/threadsync
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./backend
|
||||
import ../errors
|
||||
import ../logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex asyncerasure"
|
||||
|
||||
const
|
||||
CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal
|
||||
CompletitionRetryDelay = 10.millis
|
||||
|
||||
type
|
||||
EncoderBackendPtr = ptr EncoderBackend
|
||||
DecoderBackendPtr = ptr DecoderBackend
|
||||
|
||||
# Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy
|
||||
EncodeTaskArgs = object
|
||||
signal: ThreadSignalPtr
|
||||
backend: EncoderBackendPtr
|
||||
blockSize: int
|
||||
ecM: int
|
||||
|
||||
DecodeTaskArgs = object
|
||||
signal: ThreadSignalPtr
|
||||
backend: DecoderBackendPtr
|
||||
blockSize: int
|
||||
ecK: int
|
||||
|
||||
SharedArrayHolder*[T] = object
|
||||
data: ptr UncheckedArray[T]
|
||||
size: int
|
||||
|
||||
EncodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||
DecodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||
|
||||
proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
|
||||
var
|
||||
data = data.unsafeAddr
|
||||
parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize))
|
||||
|
||||
try:
|
||||
let res = args.backend[].encode(data[], parity)
|
||||
|
||||
if res.isOk:
|
||||
let
|
||||
resDataSize = parity.len * args.blockSize
|
||||
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||
arrHolder = SharedArrayHolder[byte](
|
||||
data: resData,
|
||||
size: resDataSize
|
||||
)
|
||||
|
||||
for i in 0..<parity.len:
|
||||
copyMem(addr resData[i * args.blockSize], addr parity[i][0], args.blockSize)
|
||||
|
||||
return ok(arrHolder)
|
||||
else:
|
||||
return err(res.error)
|
||||
except CatchableError as exception:
|
||||
return err(exception.msg.cstring)
|
||||
finally:
|
||||
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||
error "Error firing signal", msg = err.msg
|
||||
|
||||
proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]): DecodeTaskResult =
|
||||
var
|
||||
data = data.unsafeAddr
|
||||
parity = parity.unsafeAddr
|
||||
recovered = newSeqWith[seq[byte]](args.ecK, newSeq[byte](args.blockSize))
|
||||
|
||||
try:
|
||||
let res = args.backend[].decode(data[], parity[], recovered)
|
||||
|
||||
if res.isOk:
|
||||
let
|
||||
resDataSize = recovered.len * args.blockSize
|
||||
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||
arrHolder = SharedArrayHolder[byte](
|
||||
data: resData,
|
||||
size: resDataSize
|
||||
)
|
||||
|
||||
for i in 0..<recovered.len:
|
||||
copyMem(addr resData[i * args.blockSize], addr recovered[i][0], args.blockSize)
|
||||
|
||||
return ok(arrHolder)
|
||||
else:
|
||||
return err(res.error)
|
||||
except CatchableError as exception:
|
||||
return err(exception.msg.cstring)
|
||||
finally:
|
||||
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||
error "Error firing signal", msg = err.msg
|
||||
|
||||
proc proxySpawnEncodeTask(
|
||||
tp: Taskpool,
|
||||
args: EncodeTaskArgs,
|
||||
data: ref seq[seq[byte]]
|
||||
): Flowvar[EncodeTaskResult] =
|
||||
# FIXME Uncomment the code below after addressing an issue:
|
||||
# https://github.com/codex-storage/nim-codex/issues/854
|
||||
|
||||
# tp.spawn encodeTask(args, data[])
|
||||
|
||||
let fv = EncodeTaskResult.newFlowVar
|
||||
fv.readyWith(encodeTask(args, data[]))
|
||||
return fv
|
||||
|
||||
proc proxySpawnDecodeTask(
|
||||
tp: Taskpool,
|
||||
args: DecodeTaskArgs,
|
||||
data: ref seq[seq[byte]],
|
||||
parity: ref seq[seq[byte]]
|
||||
): Flowvar[DecodeTaskResult] =
|
||||
# FIXME Uncomment the code below after addressing an issue:
|
||||
# https://github.com/codex-storage/nim-codex/issues/854
|
||||
|
||||
# tp.spawn decodeTask(args, data[], parity[])
|
||||
|
||||
let fv = DecodeTaskResult.newFlowVar
|
||||
fv.readyWith(decodeTask(args, data[], parity[]))
|
||||
return fv
|
||||
|
||||
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
|
||||
await wait(signal)
|
||||
|
||||
var
|
||||
res: T
|
||||
awaitTotal: Duration
|
||||
while awaitTotal < CompletitionTimeout:
|
||||
if handle.tryComplete(res):
|
||||
return success(res)
|
||||
else:
|
||||
awaitTotal += CompletitionRetryDelay
|
||||
await sleepAsync(CompletitionRetryDelay)
|
||||
|
||||
return failure("Task signaled finish but didn't return any result within " & $CompletitionRetryDelay)
|
||||
|
||||
proc asyncEncode*(
|
||||
tp: Taskpool,
|
||||
backend: EncoderBackend,
|
||||
data: ref seq[seq[byte]],
|
||||
blockSize: int,
|
||||
ecM: int
|
||||
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||
return failure(err)
|
||||
|
||||
try:
|
||||
let
|
||||
blockSize = data[0].len
|
||||
args = EncodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM)
|
||||
handle = proxySpawnEncodeTask(tp, args, data)
|
||||
|
||||
without res =? await awaitResult(signal, handle), err:
|
||||
return failure(err)
|
||||
|
||||
if res.isOk:
|
||||
var parity = seq[seq[byte]].new()
|
||||
parity[].setLen(ecM)
|
||||
|
||||
for i in 0..<parity[].len:
|
||||
parity[i] = newSeq[byte](blockSize)
|
||||
copyMem(addr parity[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||
|
||||
deallocShared(res.value.data)
|
||||
|
||||
return success(parity)
|
||||
else:
|
||||
return failure($res.error)
|
||||
finally:
|
||||
if err =? signal.close().mapFailure.errorOption():
|
||||
error "Error closing signal", msg = $err.msg
|
||||
|
||||
proc asyncDecode*(
|
||||
tp: Taskpool,
|
||||
backend: DecoderBackend,
|
||||
data, parity: ref seq[seq[byte]],
|
||||
blockSize: int
|
||||
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||
return failure(err)
|
||||
|
||||
try:
|
||||
let
|
||||
ecK = data[].len
|
||||
args = DecodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK)
|
||||
handle = proxySpawnDecodeTask(tp, args, data, parity)
|
||||
|
||||
without res =? await awaitResult(signal, handle), err:
|
||||
return failure(err)
|
||||
|
||||
if res.isOk:
|
||||
var recovered = seq[seq[byte]].new()
|
||||
recovered[].setLen(ecK)
|
||||
|
||||
for i in 0..<recovered[].len:
|
||||
recovered[i] = newSeq[byte](blockSize)
|
||||
copyMem(addr recovered[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||
|
||||
deallocShared(res.value.data)
|
||||
|
||||
return success(recovered)
|
||||
else:
|
||||
return failure($res.error)
|
||||
finally:
|
||||
if err =? signal.close().mapFailure.errorOption():
|
||||
error "Error closing signal", msg = $err.msg
|
|
@ -11,30 +11,37 @@ import pkg/upraises
|
|||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import ../manifest
|
||||
import ../stores
|
||||
|
||||
type
|
||||
Backend* = ref object of RootObj
|
||||
ErasureBackend* = ref object of RootObj
|
||||
blockSize*: int # block size in bytes
|
||||
buffers*: int # number of original pieces
|
||||
parity*: int # number of redundancy pieces
|
||||
|
||||
EncoderBackend* = ref object of Backend
|
||||
DecoderBackend* = ref object of Backend
|
||||
EncoderBackend* = ref object of ErasureBackend
|
||||
DecoderBackend* = ref object of ErasureBackend
|
||||
|
||||
method release*(self: Backend) {.base.} =
|
||||
method release*(self: ErasureBackend) {.base.} =
|
||||
## release the backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method encode*(
|
||||
self: EncoderBackend,
|
||||
buffers,
|
||||
parity: var openArray[seq[byte]]): Result[void, cstring] {.base.} =
|
||||
self: EncoderBackend,
|
||||
buffers,
|
||||
parity: var openArray[seq[byte]]
|
||||
): Result[void, cstring] {.base.} =
|
||||
## encode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method decode*(
|
||||
self: DecoderBackend,
|
||||
buffers,
|
||||
parity,
|
||||
recovered: var openArray[seq[byte]]): Result[void, cstring] {.base.} =
|
||||
self: DecoderBackend,
|
||||
buffers,
|
||||
parity,
|
||||
recovered: var openArray[seq[byte]]
|
||||
): Result[void, cstring] {.base.} =
|
||||
## decode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
|
|
@ -25,6 +25,7 @@ method encode*(
|
|||
self: LeoEncoderBackend,
|
||||
data,
|
||||
parity: var openArray[seq[byte]]): Result[void, cstring] =
|
||||
## Encode data using Leopard backend
|
||||
|
||||
if parity.len == 0:
|
||||
return ok()
|
||||
|
@ -45,8 +46,10 @@ method decode*(
|
|||
data,
|
||||
parity,
|
||||
recovered: var openArray[seq[byte]]): Result[void, cstring] =
|
||||
## Decode data using given Leopard backend
|
||||
|
||||
var decoder = if self.decoder.isNone:
|
||||
var decoder =
|
||||
if self.decoder.isNone:
|
||||
self.decoder = (? LeoDecoder.init(
|
||||
self.blockSize,
|
||||
self.buffers,
|
||||
|
@ -65,22 +68,26 @@ method release*(self: LeoDecoderBackend) =
|
|||
if self.decoder.isSome:
|
||||
self.decoder.get().free()
|
||||
|
||||
func new*(
|
||||
proc new*(
|
||||
T: type LeoEncoderBackend,
|
||||
blockSize,
|
||||
buffers,
|
||||
parity: int): T =
|
||||
T(
|
||||
parity: int): LeoEncoderBackend =
|
||||
## Create an instance of an Leopard Encoder backend
|
||||
##
|
||||
LeoEncoderBackend(
|
||||
blockSize: blockSize,
|
||||
buffers: buffers,
|
||||
parity: parity)
|
||||
|
||||
func new*(
|
||||
proc new*(
|
||||
T: type LeoDecoderBackend,
|
||||
blockSize,
|
||||
buffers,
|
||||
parity: int): T =
|
||||
T(
|
||||
parity: int): LeoDecoderBackend =
|
||||
## Create an instance of an Leopard Decoder backend
|
||||
##
|
||||
LeoDecoderBackend(
|
||||
blockSize: blockSize,
|
||||
buffers: buffers,
|
||||
parity: parity)
|
||||
|
|
|
@ -12,16 +12,27 @@ import pkg/upraises
|
|||
push: {.upraises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p/[multicodec, cid, multihash]
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/taskpools
|
||||
|
||||
import ../logutils
|
||||
import ../manifest
|
||||
import ../merkletree
|
||||
import ../stores
|
||||
import ../errors
|
||||
import ../blocktype as bt
|
||||
import ../utils
|
||||
import ../utils/asynciter
|
||||
import ../indexingstrategy
|
||||
import ../errors
|
||||
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import ./backend
|
||||
import ./asyncbackend
|
||||
|
||||
export backend
|
||||
|
||||
|
@ -62,86 +73,275 @@ type
|
|||
encoderProvider*: EncoderProvider
|
||||
decoderProvider*: DecoderProvider
|
||||
store*: BlockStore
|
||||
taskpool: Taskpool
|
||||
|
||||
proc encode*(
|
||||
EncodingParams = object
|
||||
ecK: Natural
|
||||
ecM: Natural
|
||||
rounded: Natural
|
||||
steps: Natural
|
||||
blocksCount: Natural
|
||||
strategy: StrategyType
|
||||
|
||||
ErasureError* = object of CodexError
|
||||
InsufficientBlocksError* = object of ErasureError
|
||||
# Minimum size, in bytes, that the dataset must have had
|
||||
# for the encoding request to have succeeded with the parameters
|
||||
# provided.
|
||||
minSize*: NBytes
|
||||
|
||||
func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||
## Convert an index to a position in the encoded
|
||||
## dataset
|
||||
## `idx` - the index to convert
|
||||
## `step` - the current step
|
||||
## `pos` - the position in the encoded dataset
|
||||
##
|
||||
|
||||
(idx - step) div steps
|
||||
|
||||
proc getPendingBlocks(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
blocks: int,
|
||||
parity: int): Future[?!Manifest] {.async.} =
|
||||
## Encode a manifest into one that is erasure protected.
|
||||
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
|
||||
## Get pending blocks iterator
|
||||
##
|
||||
## `manifest` - the original manifest to be encoded
|
||||
## `blocks` - the number of blocks to be encoded - K
|
||||
## `parity` - the number of parity blocks to generate - M
|
||||
##
|
||||
|
||||
logScope:
|
||||
original_cid = manifest.cid.get()
|
||||
original_len = manifest.len
|
||||
blocks = blocks
|
||||
parity = parity
|
||||
|
||||
trace "Erasure coding manifest", blocks, parity
|
||||
without var encoded =? Manifest.new(manifest, blocks, parity), error:
|
||||
trace "Unable to create manifest", msg = error.msg
|
||||
return error.failure
|
||||
|
||||
logScope:
|
||||
steps = encoded.steps
|
||||
rounded_blocks = encoded.rounded
|
||||
new_manifest = encoded.len
|
||||
|
||||
var
|
||||
encoder = self.encoderProvider(manifest.blockSize, blocks, parity)
|
||||
# request blocks from the store
|
||||
pendingBlocks = indicies.map( (i: int) =>
|
||||
self.store.getBlock(
|
||||
BlockAddress.init(manifest.treeCid, i)
|
||||
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
|
||||
)
|
||||
|
||||
proc isFinished(): bool = pendingBlocks.len == 0
|
||||
|
||||
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
||||
let completedFut = await one(pendingBlocks)
|
||||
if (let i = pendingBlocks.find(completedFut); i >= 0):
|
||||
pendingBlocks.del(i)
|
||||
return await completedFut
|
||||
else:
|
||||
let (_, index) = await completedFut
|
||||
raise newException(
|
||||
CatchableError,
|
||||
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
|
||||
|
||||
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
|
||||
|
||||
proc prepareEncodingData(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
params: EncodingParams,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
|
||||
## Prepare data for encoding
|
||||
##
|
||||
|
||||
let
|
||||
strategy = params.strategy.init(
|
||||
firstIndex = 0,
|
||||
lastIndex = params.rounded - 1,
|
||||
iterations = params.steps
|
||||
)
|
||||
indicies = toSeq(strategy.getIndicies(step))
|
||||
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
||||
|
||||
var resolved = 0
|
||||
for fut in pendingBlocksIter:
|
||||
let (blkOrErr, idx) = await fut
|
||||
without blk =? blkOrErr, err:
|
||||
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
||||
continue
|
||||
|
||||
let pos = indexToPos(params.steps, idx, step)
|
||||
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
cids[idx] = blk.cid
|
||||
|
||||
resolved.inc()
|
||||
|
||||
for idx in indicies.filterIt(it >= manifest.blocksCount):
|
||||
let pos = indexToPos(params.steps, idx, step)
|
||||
trace "Padding with empty block", idx
|
||||
shallowCopy(data[pos], emptyBlock)
|
||||
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
|
||||
return failure(err)
|
||||
cids[idx] = emptyBlockCid
|
||||
|
||||
success(resolved.Natural)
|
||||
|
||||
proc prepareDecodingData(
|
||||
self: Erasure,
|
||||
encoded: Manifest,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
parityData: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
|
||||
## Prepare data for decoding
|
||||
## `encoded` - the encoded manifest
|
||||
## `step` - the current step
|
||||
## `data` - the data to be prepared
|
||||
## `parityData` - the parityData to be prepared
|
||||
## `cids` - cids of prepared data
|
||||
## `emptyBlock` - the empty block to be used for padding
|
||||
##
|
||||
|
||||
let
|
||||
strategy = encoded.protectedStrategy.init(
|
||||
firstIndex = 0,
|
||||
lastIndex = encoded.blocksCount - 1,
|
||||
iterations = encoded.steps
|
||||
)
|
||||
indicies = toSeq(strategy.getIndicies(step))
|
||||
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
||||
|
||||
var
|
||||
dataPieces = 0
|
||||
parityPieces = 0
|
||||
resolved = 0
|
||||
for fut in pendingBlocksIter:
|
||||
# Continue to receive blocks until we have just enough for decoding
|
||||
# or no more blocks can arrive
|
||||
if resolved >= encoded.ecK:
|
||||
break
|
||||
|
||||
let (blkOrErr, idx) = await fut
|
||||
without blk =? blkOrErr, err:
|
||||
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||
continue
|
||||
|
||||
let
|
||||
pos = indexToPos(encoded.steps, idx, step)
|
||||
|
||||
logScope:
|
||||
cid = blk.cid
|
||||
idx = idx
|
||||
pos = pos
|
||||
step = step
|
||||
empty = blk.isEmpty
|
||||
|
||||
cids[idx] = blk.cid
|
||||
if idx >= encoded.rounded:
|
||||
trace "Retrieved parity block"
|
||||
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
parityPieces.inc
|
||||
else:
|
||||
trace "Retrieved data block"
|
||||
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
dataPieces.inc
|
||||
|
||||
resolved.inc
|
||||
|
||||
return success (dataPieces.Natural, parityPieces.Natural)
|
||||
|
||||
proc init*(
|
||||
_: type EncodingParams,
|
||||
manifest: Manifest,
|
||||
ecK: Natural, ecM: Natural,
|
||||
strategy: StrategyType): ?!EncodingParams =
|
||||
if ecK > manifest.blocksCount:
|
||||
let exc = (ref InsufficientBlocksError)(
|
||||
msg: "Unable to encode manifest, not enough blocks, ecK = " &
|
||||
$ecK &
|
||||
", blocksCount = " &
|
||||
$manifest.blocksCount,
|
||||
minSize: ecK.NBytes * manifest.blockSize)
|
||||
return failure(exc)
|
||||
|
||||
let
|
||||
rounded = roundUp(manifest.blocksCount, ecK)
|
||||
steps = divUp(rounded, ecK)
|
||||
blocksCount = rounded + (steps * ecM)
|
||||
|
||||
success EncodingParams(
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
rounded: rounded,
|
||||
steps: steps,
|
||||
blocksCount: blocksCount,
|
||||
strategy: strategy
|
||||
)
|
||||
|
||||
proc encodeData(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
params: EncodingParams
|
||||
): Future[?!Manifest] {.async.} =
|
||||
## Encode blocks pointed to by the protected manifest
|
||||
##
|
||||
## `manifest` - the manifest to encode
|
||||
##
|
||||
|
||||
logScope:
|
||||
steps = params.steps
|
||||
rounded_blocks = params.rounded
|
||||
blocks_count = params.blocksCount
|
||||
ecK = params.ecK
|
||||
ecM = params.ecM
|
||||
|
||||
var
|
||||
cids = seq[Cid].new()
|
||||
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
|
||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||
|
||||
cids[].setLen(params.blocksCount)
|
||||
|
||||
try:
|
||||
for i in 0..<encoded.steps:
|
||||
for step in 0..<params.steps:
|
||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||
var
|
||||
data = newSeq[seq[byte]](blocks) # number of blocks to encode
|
||||
parityData = newSeqWith[seq[byte]](parity, newSeq[byte](manifest.blockSize))
|
||||
# calculate block indexes to retrieve
|
||||
blockIdx = toSeq(countup(i, encoded.rounded - 1, encoded.steps))
|
||||
# request all blocks from the store
|
||||
dataBlocks = await allFinished(
|
||||
blockIdx.mapIt( self.store.getBlock(encoded[it]) ))
|
||||
data = seq[seq[byte]].new() # number of blocks to encode
|
||||
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
# other events to be processed, this should be addressed
|
||||
# by threading
|
||||
await sleepAsync(10.millis)
|
||||
data[].setLen(params.ecK)
|
||||
|
||||
for j in 0..<blocks:
|
||||
let idx = blockIdx[j]
|
||||
if idx < manifest.len:
|
||||
without blk =? (await dataBlocks[j]), error:
|
||||
trace "Unable to retrieve block", error = error.msg
|
||||
return failure error
|
||||
without resolved =?
|
||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Encoding block", cid = blk.cid, pos = idx
|
||||
shallowCopy(data[j], blk.data)
|
||||
else:
|
||||
trace "Padding with empty block", pos = idx
|
||||
data[j] = newSeq[byte](manifest.blockSize)
|
||||
trace "Erasure coding data", data = data[].len, parity = params.ecM
|
||||
|
||||
trace "Erasure coding data", data = data.len, parity = parityData.len
|
||||
without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err:
|
||||
trace "Error encoding data", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
let res = encoder.encode(data, parityData);
|
||||
if res.isErr:
|
||||
trace "Unable to encode manifest!", error = $res.error
|
||||
return failure($res.error)
|
||||
|
||||
for j in 0..<parity:
|
||||
let idx = encoded.rounded + blockIdx[j]
|
||||
without blk =? bt.Block.new(parityData[j]), error:
|
||||
var idx = params.rounded + step
|
||||
for j in 0..<params.ecM:
|
||||
without blk =? bt.Block.new(parity[j]), error:
|
||||
trace "Unable to create parity block", err = error.msg
|
||||
return failure(error)
|
||||
|
||||
trace "Adding parity block", cid = blk.cid, pos = idx
|
||||
encoded[idx] = blk.cid
|
||||
trace "Adding parity block", cid = blk.cid, idx
|
||||
cids[idx] = blk.cid
|
||||
if isErr (await self.store.putBlock(blk)):
|
||||
trace "Unable to store block!", cid = blk.cid
|
||||
return failure("Unable to store block!")
|
||||
idx.inc(params.steps)
|
||||
|
||||
without tree =? CodexTree.init(cids[]), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
let encodedManifest = Manifest.new(
|
||||
manifest = manifest,
|
||||
treeCid = treeCid,
|
||||
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
||||
ecK = params.ecK,
|
||||
ecM = params.ecM,
|
||||
strategy = params.strategy
|
||||
)
|
||||
|
||||
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
||||
success encodedManifest
|
||||
except CancelledError as exc:
|
||||
trace "Erasure coding encoding cancelled"
|
||||
raise exc # cancellation needs to be propagated
|
||||
|
@ -151,7 +351,26 @@ proc encode*(
|
|||
finally:
|
||||
encoder.release()
|
||||
|
||||
return encoded.success
|
||||
proc encode*(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
blocks: Natural,
|
||||
parity: Natural,
|
||||
strategy = SteppedStrategy): Future[?!Manifest] {.async.} =
|
||||
## Encode a manifest into one that is erasure protected.
|
||||
##
|
||||
## `manifest` - the original manifest to be encoded
|
||||
## `blocks` - the number of blocks to be encoded - K
|
||||
## `parity` - the number of parity blocks to generate - M
|
||||
##
|
||||
|
||||
without params =? EncodingParams.init(manifest, blocks.int, parity.int, strategy), err:
|
||||
return failure(err)
|
||||
|
||||
without encodedManifest =? await self.encodeData(manifest, params), err:
|
||||
return failure(err)
|
||||
|
||||
return success encodedManifest
|
||||
|
||||
proc decode*(
|
||||
self: Erasure,
|
||||
|
@ -166,85 +385,53 @@ proc decode*(
|
|||
logScope:
|
||||
steps = encoded.steps
|
||||
rounded_blocks = encoded.rounded
|
||||
new_manifest = encoded.len
|
||||
new_manifest = encoded.blocksCount
|
||||
|
||||
var
|
||||
decoder = self.decoderProvider(encoded.blockSize, encoded.K, encoded.M)
|
||||
cids = seq[Cid].new()
|
||||
recoveredIndices = newSeq[Natural]()
|
||||
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
|
||||
emptyBlock = newSeq[byte](encoded.blockSize.int)
|
||||
|
||||
cids[].setLen(encoded.blocksCount)
|
||||
try:
|
||||
for i in 0..<encoded.steps:
|
||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||
let
|
||||
# calculate block indexes to retrieve
|
||||
blockIdx = toSeq(countup(i, encoded.len - 1, encoded.steps))
|
||||
# request all blocks from the store
|
||||
pendingBlocks = blockIdx.mapIt(
|
||||
self.store.getBlock(encoded[it]) # Get the data blocks (first K)
|
||||
)
|
||||
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
# other events to be processed, this should be addressed
|
||||
# by threading
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
for step in 0..<encoded.steps:
|
||||
var
|
||||
data = newSeq[seq[byte]](encoded.K) # number of blocks to encode
|
||||
parityData = newSeq[seq[byte]](encoded.M)
|
||||
recovered = newSeqWith[seq[byte]](encoded.K, newSeq[byte](encoded.blockSize))
|
||||
idxPendingBlocks = pendingBlocks # copy futures to make using with `one` easier
|
||||
emptyBlock = newSeq[byte](encoded.blockSize)
|
||||
resolved = 0
|
||||
data = seq[seq[byte]].new()
|
||||
parity = seq[seq[byte]].new()
|
||||
|
||||
while true:
|
||||
# Continue to receive blocks until we have just enough for decoding
|
||||
# or no more blocks can arrive
|
||||
if (resolved >= encoded.K) or (idxPendingBlocks.len == 0):
|
||||
break
|
||||
data[].setLen(encoded.ecK) # set len to K
|
||||
parity[].setLen(encoded.ecM) # set len to M
|
||||
|
||||
let
|
||||
done = await one(idxPendingBlocks)
|
||||
idx = pendingBlocks.find(done)
|
||||
without (dataPieces, _) =?
|
||||
(await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
idxPendingBlocks.del(idxPendingBlocks.find(done))
|
||||
|
||||
without blk =? (await done), error:
|
||||
trace "Failed retrieving block", error = error.msg
|
||||
continue
|
||||
|
||||
if idx >= encoded.K:
|
||||
trace "Retrieved parity block", cid = blk.cid, idx
|
||||
shallowCopy(parityData[idx - encoded.K], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
else:
|
||||
trace "Retrieved data block", cid = blk.cid, idx
|
||||
shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
|
||||
resolved.inc
|
||||
|
||||
let
|
||||
dataPieces = data.filterIt( it.len > 0 ).len
|
||||
parityPieces = parityData.filterIt( it.len > 0 ).len
|
||||
|
||||
if dataPieces >= encoded.K:
|
||||
trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces
|
||||
if dataPieces >= encoded.ecK:
|
||||
trace "Retrieved all the required data blocks"
|
||||
continue
|
||||
|
||||
trace "Erasure decoding data", data = dataPieces, parity = parityPieces
|
||||
if (
|
||||
let err = decoder.decode(data, parityData, recovered);
|
||||
err.isErr):
|
||||
trace "Unable to decode manifest!", err = $err.error
|
||||
return failure($err.error)
|
||||
trace "Erasure decoding data"
|
||||
|
||||
for i in 0..<encoded.K:
|
||||
if data[i].len <= 0:
|
||||
without recovered =? await asyncDecode(self.taskpool, decoder, data, parity, encoded.blockSize.int), err:
|
||||
trace "Error decoding data", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
for i in 0..<encoded.ecK:
|
||||
let idx = i * encoded.steps + step
|
||||
if data[i].len <= 0 and not cids[idx].isEmpty:
|
||||
without blk =? bt.Block.new(recovered[i]), error:
|
||||
trace "Unable to create block!", exc = error.msg
|
||||
return failure(error)
|
||||
|
||||
trace "Recovered block", cid = blk.cid
|
||||
trace "Recovered block", cid = blk.cid, index = i
|
||||
if isErr (await self.store.putBlock(blk)):
|
||||
trace "Unable to store block!", cid = blk.cid
|
||||
return failure("Unable to store block!")
|
||||
|
||||
cids[idx] = blk.cid
|
||||
recoveredIndices.add(idx)
|
||||
except CancelledError as exc:
|
||||
trace "Erasure coding decoding cancelled"
|
||||
raise exc # cancellation needs to be propagated
|
||||
|
@ -254,8 +441,22 @@ proc decode*(
|
|||
finally:
|
||||
decoder.release()
|
||||
|
||||
without decoded =? Manifest.new(blocks = encoded.blocks[0..<encoded.originalLen]), error:
|
||||
return error.failure
|
||||
without tree =? CodexTree.init(cids[0..<encoded.originalBlocksCount]), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if treeCid != encoded.originalTreeCid:
|
||||
return failure("Original tree root differs from the tree root computed out of recovered data")
|
||||
|
||||
let idxIter = Iter[Natural].new(recoveredIndices)
|
||||
.filter((i: Natural) => i < tree.leavesCount)
|
||||
|
||||
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
let decoded = Manifest.new(encoded)
|
||||
|
||||
return decoded.success
|
||||
|
||||
|
@ -269,9 +470,13 @@ proc new*(
|
|||
T: type Erasure,
|
||||
store: BlockStore,
|
||||
encoderProvider: EncoderProvider,
|
||||
decoderProvider: DecoderProvider): Erasure =
|
||||
decoderProvider: DecoderProvider,
|
||||
taskpool: Taskpool): Erasure =
|
||||
## Create a new Erasure instance for encoding and decoding manifests
|
||||
##
|
||||
|
||||
Erasure(
|
||||
store: store,
|
||||
encoderProvider: encoderProvider,
|
||||
decoderProvider: decoderProvider)
|
||||
decoderProvider: decoderProvider,
|
||||
taskpool: taskpool)
|
||||
|
|
|
@ -7,16 +7,43 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/options
|
||||
|
||||
import pkg/stew/results
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
|
||||
export results
|
||||
|
||||
type
|
||||
CodexError* = object of CatchableError # base codex error
|
||||
CodexResult*[T] = Result[T, ref CodexError]
|
||||
|
||||
template mapFailure*(
|
||||
exp: untyped,
|
||||
exc: typed = type CodexError): untyped =
|
||||
template mapFailure*[T, V, E](
|
||||
exp: Result[T, V],
|
||||
exc: typedesc[E],
|
||||
): Result[T, ref CatchableError] =
|
||||
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
||||
##
|
||||
|
||||
((exp.mapErr do (e: auto) -> ref CatchableError: (ref exc)(msg: $e)))
|
||||
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
|
||||
|
||||
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
|
||||
mapFailure(exp, CodexError)
|
||||
|
||||
# TODO: using a template here, causes bad codegen
|
||||
func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
||||
if exp.isSome:
|
||||
success exp.get
|
||||
else:
|
||||
T.failure("Option is None")
|
||||
|
||||
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
||||
try:
|
||||
await allFuturesThrowing(fut)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
return failure(exc.msg)
|
||||
|
||||
return success()
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/strutils
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
|
||||
func shortLog*(cid: Cid): string =
|
||||
## Returns compact string representation of ``pid``.
|
||||
var scid = $cid
|
||||
if len(scid) > 10:
|
||||
scid[3] = '*'
|
||||
|
||||
when (NimMajor, NimMinor) > (1, 4):
|
||||
scid.delete(4 .. scid.high - 6)
|
||||
else:
|
||||
scid.delete(4, scid.high - 6)
|
||||
|
||||
scid
|
||||
|
||||
chronicles.formatIt(Cid): shortLog(it)
|
|
@ -0,0 +1,97 @@
|
|||
import ./errors
|
||||
import ./utils
|
||||
import ./utils/asynciter
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
StrategyType* = enum
|
||||
# Simplest approach:
|
||||
# 0 => 0, 1, 2
|
||||
# 1 => 3, 4, 5
|
||||
# 2 => 6, 7, 8
|
||||
LinearStrategy,
|
||||
|
||||
# Stepped indexing:
|
||||
# 0 => 0, 3, 6
|
||||
# 1 => 1, 4, 7
|
||||
# 2 => 2, 5, 8
|
||||
SteppedStrategy
|
||||
|
||||
# Representing a strategy for grouping indices (of blocks usually)
|
||||
# Given an interation-count as input, will produce a seq of
|
||||
# selected indices.
|
||||
|
||||
IndexingError* = object of CodexError
|
||||
IndexingWrongIndexError* = object of IndexingError
|
||||
IndexingWrongIterationsError* = object of IndexingError
|
||||
|
||||
IndexingStrategy* = object
|
||||
strategyType*: StrategyType
|
||||
firstIndex*: int # Lowest index that can be returned
|
||||
lastIndex*: int # Highest index that can be returned
|
||||
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
|
||||
step*: int
|
||||
|
||||
func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} =
|
||||
if iteration >= self.iterations:
|
||||
raise newException(
|
||||
IndexingError,
|
||||
"Indexing iteration can't be greater than or equal to iterations.")
|
||||
|
||||
func getIter(first, last, step: int): Iter[int] =
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(first, last, step)
|
||||
|
||||
func getLinearIndicies(
|
||||
self: IndexingStrategy,
|
||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
|
||||
let
|
||||
first = self.firstIndex + iteration * self.step
|
||||
last = min(first + self.step - 1, self.lastIndex)
|
||||
|
||||
getIter(first, last, 1)
|
||||
|
||||
func getSteppedIndicies(
|
||||
self: IndexingStrategy,
|
||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
|
||||
let
|
||||
first = self.firstIndex + iteration
|
||||
last = self.lastIndex
|
||||
|
||||
getIter(first, last, self.iterations)
|
||||
|
||||
func getIndicies*(
|
||||
self: IndexingStrategy,
|
||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||
|
||||
case self.strategyType
|
||||
of StrategyType.LinearStrategy:
|
||||
self.getLinearIndicies(iteration)
|
||||
of StrategyType.SteppedStrategy:
|
||||
self.getSteppedIndicies(iteration)
|
||||
|
||||
func init*(
|
||||
strategy: StrategyType,
|
||||
firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} =
|
||||
|
||||
if firstIndex > lastIndex:
|
||||
raise newException(
|
||||
IndexingWrongIndexError,
|
||||
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")")
|
||||
|
||||
if iterations <= 0:
|
||||
raise newException(
|
||||
IndexingWrongIterationsError,
|
||||
"iterations (" & $iterations & ") must be greater than zero.")
|
||||
|
||||
IndexingStrategy(
|
||||
strategyType: strategy,
|
||||
firstIndex: firstIndex,
|
||||
lastIndex: lastIndex,
|
||||
iterations: iterations,
|
||||
step: divUp((lastIndex - firstIndex + 1), iterations))
|
|
@ -0,0 +1,242 @@
|
|||
## logutils is a module that has several goals:
|
||||
## 1. Fix json logging output (run with `--log-format=json`) which was
|
||||
## effectively broken for many types using default Chronicles json
|
||||
## serialization.
|
||||
## 2. Ability to specify log output for textlines and json sinks together or
|
||||
## separately
|
||||
## - This is useful if consuming json in some kind of log parser and need
|
||||
## valid json with real values
|
||||
## - eg a shortened Cid is nice to see in a text log in stdout, but won't
|
||||
## provide a real Cid when parsed in json
|
||||
## 4. Remove usages of `nim-json-serialization` from the codebase
|
||||
## 5. Remove need to declare `writeValue` for new types
|
||||
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
|
||||
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
|
||||
##
|
||||
## When declaring a new type, one should consider importing the `codex/logutils`
|
||||
## module, and specifying `formatIt`. If textlines log output and json log output
|
||||
## need to be different, overload `formatIt` and specify a `LogFormat`. If json
|
||||
## serialization is needed, it can be declared with a `%` proc. `logutils`
|
||||
## imports and exports `nim-serde` which handles the de/serialization, examples
|
||||
## below. **Only `codex/logutils` needs to be imported.**
|
||||
##
|
||||
## Using `logutils` in the Codex codebase:
|
||||
## - Instead of importing `pkg/chronicles`, import `pkg/codex/logutils`
|
||||
## - most of `chronicles` is exported by `logutils`
|
||||
## - Instead of importing `std/json`, import `pkg/serde/json`
|
||||
## - `std/json` is exported by `serde` which is exported by `logutils`
|
||||
## - Instead of importing `pkg/nim-json-serialization`, import
|
||||
## `pkg/serde/json` or use codex-specific overloads by importing `utils/json`
|
||||
## - one of the goals is to remove the use of `nim-json-serialization`
|
||||
##
|
||||
## ```nim
|
||||
## import pkg/codex/logutils
|
||||
##
|
||||
## type
|
||||
## BlockAddress* = object
|
||||
## case leaf*: bool
|
||||
## of true:
|
||||
## treeCid* {.serialize.}: Cid
|
||||
## index* {.serialize.}: Natural
|
||||
## else:
|
||||
## cid* {.serialize.}: Cid
|
||||
##
|
||||
## logutils.formatIt(LogFormat.textLines, BlockAddress):
|
||||
## if it.leaf:
|
||||
## "treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
|
||||
## else:
|
||||
## "cid: " & shortLog($it.cid)
|
||||
##
|
||||
## logutils.formatIt(LogFormat.json, BlockAddress): %it
|
||||
##
|
||||
## # chronicles textlines output
|
||||
## TRC test tid=14397405 ba="treeCid: zb2*fndjU1, index: 0"
|
||||
## # chronicles json output
|
||||
## {"lvl":"TRC","msg":"test","tid":14397405,"ba":{"treeCid":"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1","index":0}}
|
||||
## ```
|
||||
## In this case, `BlockAddress` is just an object, so `nim-serde` can handle
|
||||
## serializing it without issue (only fields annotated with `{.serialize.}` will
|
||||
## serialize (aka opt-in serialization)).
|
||||
##
|
||||
## If one so wished, another option for the textlines log output, would be to
|
||||
## simply `toString` the serialised json:
|
||||
## ```nim
|
||||
## logutils.formatIt(LogFormat.textLines, BlockAddress): $ %it
|
||||
## # or, more succinctly:
|
||||
## logutils.formatIt(LogFormat.textLines, BlockAddress): it.toJson
|
||||
## ```
|
||||
## In that case, both the textlines and json sinks would have the same output,
|
||||
## so we could reduce this even further by not specifying a `LogFormat`:
|
||||
## ```nim
|
||||
## type
|
||||
## BlockAddress* = object
|
||||
## case leaf*: bool
|
||||
## of true:
|
||||
## treeCid* {.serialize.}: Cid
|
||||
## index* {.serialize.}: Natural
|
||||
## else:
|
||||
## cid* {.serialize.}: Cid
|
||||
##
|
||||
## logutils.formatIt(BlockAddress): %it
|
||||
##
|
||||
## # chronicles textlines output
|
||||
## TRC test tid=14400673 ba="{\"treeCid\":\"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1\",\"index\":0}"
|
||||
## # chronicles json output
|
||||
## {"lvl":"TRC","msg":"test","tid":14400673,"ba":{"treeCid":"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1","index":0}}
|
||||
## ```
|
||||
|
||||
import std/options
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/sugar
|
||||
import std/typetraits
|
||||
|
||||
import pkg/chronicles except toJson, `%`
|
||||
from pkg/libp2p import Cid, MultiAddress, `$`
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import ./utils/json except formatIt # TODO: remove exception?
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/upraises
|
||||
|
||||
export byteutils
|
||||
export chronicles except toJson, formatIt, `%`
|
||||
export questionable
|
||||
export sequtils
|
||||
export json except formatIt
|
||||
export strutils
|
||||
export sugar
|
||||
export upraises
|
||||
export results
|
||||
|
||||
func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
|
||||
## Returns compact string representation of ``long``.
|
||||
var short = long
|
||||
let minLen = start + ellipses.len + stop
|
||||
if len(short) > minLen:
|
||||
short.insert(ellipses, start)
|
||||
|
||||
when (NimMajor, NimMinor) > (1, 4):
|
||||
short.delete(start + ellipses.len .. short.high - stop)
|
||||
else:
|
||||
short.delete(start + ellipses.len, short.high - stop)
|
||||
|
||||
short
|
||||
|
||||
func shortHexLog*(long: string): string =
|
||||
if long[0..1] == "0x": result &= "0x"
|
||||
result &= long[2..long.high].shortLog("..", 4, 4)
|
||||
|
||||
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
|
||||
v.to0xHex.shortHexLog
|
||||
|
||||
func short0xHexLog*[T: distinct](v: T): string =
|
||||
type BaseType = T.distinctBase
|
||||
BaseType(v).short0xHexLog
|
||||
|
||||
func short0xHexLog*[U: distinct, T: seq[U]](v: T): string =
|
||||
type BaseType = U.distinctBase
|
||||
"@[" & v.map(x => BaseType(x).short0xHexLog).join(",") & "]"
|
||||
|
||||
func to0xHexLog*[T: distinct](v: T): string =
|
||||
type BaseType = T.distinctBase
|
||||
BaseType(v).to0xHex
|
||||
|
||||
func to0xHexLog*[U: distinct, T: seq[U]](v: T): string =
|
||||
type BaseType = U.distinctBase
|
||||
"@[" & v.map(x => BaseType(x).to0xHex).join(",") & "]"
|
||||
|
||||
proc formatTextLineSeq*(val: seq[string]): string =
|
||||
"@[" & val.join(", ") & "]"
|
||||
|
||||
template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
||||
# Provides formatters for logging with Chronicles for the given type and
|
||||
# `LogFormat`.
|
||||
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden
|
||||
# since the base `setProperty` is generic using `auto` and conflicts with
|
||||
# providing a generic `seq` and `Option` override.
|
||||
when format == LogFormat.json:
|
||||
proc formatJsonOption(val: ?T): JsonNode =
|
||||
if it =? val:
|
||||
json.`%`(body)
|
||||
else:
|
||||
newJNull()
|
||||
|
||||
proc formatJsonResult*(val: ?!T): JsonNode =
|
||||
without it =? val, error:
|
||||
let jObj = newJObject()
|
||||
jObj["error"] = newJString(error.msg)
|
||||
return jObj
|
||||
json.`%`(body)
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, res: ?!T) =
|
||||
var it {.inject, used.}: T
|
||||
setProperty(r, key, res.formatJsonResult)
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, opt: ?T) =
|
||||
var it {.inject, used.}: T
|
||||
let v = opt.formatJsonOption
|
||||
setProperty(r, key, v)
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, opts: seq[?T]) =
|
||||
var it {.inject, used.}: T
|
||||
let v = opts.map(opt => opt.formatJsonOption)
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) =
|
||||
var it {.inject, used.}: T
|
||||
let v = val.map(it => body)
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||
var it {.inject, used.}: T = val
|
||||
let v = body
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
elif format == LogFormat.textLines:
|
||||
proc formatTextLineOption*(val: ?T): string =
|
||||
var v = "none(" & $T & ")"
|
||||
if it =? val:
|
||||
v = "some(" & $(body) & ")" # that I used to know :)
|
||||
v
|
||||
|
||||
proc formatTextLineResult*(val: ?!T): string =
|
||||
without it =? val, error:
|
||||
return "Error: " & error.msg
|
||||
$(body)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, res: ?!T) =
|
||||
var it {.inject, used.}: T
|
||||
setProperty(r, key, res.formatTextLineResult)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, opt: ?T) =
|
||||
var it {.inject, used.}: T
|
||||
let v = opt.formatTextLineOption
|
||||
setProperty(r, key, v)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, opts: seq[?T]) =
|
||||
var it {.inject, used.}: T
|
||||
let v = opts.map(opt => opt.formatTextLineOption)
|
||||
setProperty(r, key, v.formatTextLineSeq)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) =
|
||||
var it {.inject, used.}: T
|
||||
let v = val.map(it => body)
|
||||
setProperty(r, key, v.formatTextLineSeq)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||
var it {.inject, used.}: T = val
|
||||
let v = body
|
||||
setProperty(r, key, v)
|
||||
|
||||
template formatIt*(T: type, body: untyped) {.dirty.} =
|
||||
formatIt(LogFormat.textLines, T): body
|
||||
formatIt(LogFormat.json, T): body
|
||||
|
||||
formatIt(LogFormat.textLines, Cid): shortLog($it)
|
||||
formatIt(LogFormat.json, Cid): $it
|
||||
formatIt(UInt256): $it
|
||||
formatIt(MultiAddress): $it
|
||||
formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog
|
||||
formatIt(LogFormat.json, array[32, byte]): it.to0xHex
|
|
@ -1,5 +1,4 @@
|
|||
import ./manifest/coders
|
||||
import ./manifest/manifest
|
||||
import ./manifest/types
|
||||
|
||||
export types, manifest, coders
|
||||
export manifest, coders
|
||||
|
|
|
@ -14,19 +14,20 @@ import pkg/upraises
|
|||
push: {.upraises: [].}
|
||||
|
||||
import std/tables
|
||||
import std/sequtils
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
|
||||
import ./manifest
|
||||
import ../errors
|
||||
import ../blocktype
|
||||
import ./types
|
||||
import ../logutils
|
||||
import ../indexingstrategy
|
||||
|
||||
func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
||||
proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
## Encode the manifest into a ``ManifestCodec``
|
||||
## multicodec container (Dag-pb) for now
|
||||
##
|
||||
|
@ -34,54 +35,67 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
|||
? manifest.verify()
|
||||
var pbNode = initProtoBuffer()
|
||||
|
||||
for c in manifest.blocks:
|
||||
var pbLink = initProtoBuffer()
|
||||
pbLink.write(1, c.data.buffer) # write Cid links
|
||||
pbLink.finish()
|
||||
pbNode.write(2, pbLink)
|
||||
|
||||
# NOTE: The `Data` field in the the `dag-pb`
|
||||
# contains the following protobuf `Message`
|
||||
#
|
||||
# ```protobuf
|
||||
# Message ErasureInfo {
|
||||
# optional uint32 K = 1; # number of encoded blocks
|
||||
# optional uint32 M = 2; # number of parity blocks
|
||||
# optional bytes cid = 3; # cid of the original dataset
|
||||
# optional uint32 original = 4; # number of original blocks
|
||||
# Message VerificationInfo {
|
||||
# bytes verifyRoot = 1; # Decimal encoded field-element
|
||||
# repeated bytes slotRoots = 2; # Decimal encoded field-elements
|
||||
# }
|
||||
# Message ErasureInfo {
|
||||
# optional uint32 ecK = 1; # number of encoded blocks
|
||||
# optional uint32 ecM = 2; # number of parity blocks
|
||||
# optional bytes originalTreeCid = 3; # cid of the original dataset
|
||||
# optional uint32 originalDatasetSize = 4; # size of the original dataset
|
||||
# optional VerificationInformation verification = 5; # verification information
|
||||
# }
|
||||
#
|
||||
# Message Header {
|
||||
# optional bytes rootHash = 1; # the root (tree) hash
|
||||
# optional bytes treeCid = 1; # cid (root) of the tree
|
||||
# optional uint32 blockSize = 2; # size of a single block
|
||||
# optional uint32 blocksLen = 3; # total amount of blocks
|
||||
# optional ErasureInfo erasure = 4; # erasure coding info
|
||||
# optional uint64 originalBytes = 5;# exact file size
|
||||
# optional uint64 datasetSize = 3; # size of the dataset
|
||||
# optional codec: MultiCodec = 4; # Dataset codec
|
||||
# optional hcodec: MultiCodec = 5 # Multihash codec
|
||||
# optional version: CidVersion = 6; # Cid version
|
||||
# optional ErasureInfo erasure = 7; # erasure coding info
|
||||
# }
|
||||
# ```
|
||||
#
|
||||
|
||||
let cid = !manifest.rootHash
|
||||
# var treeRootVBuf = initVBuffer()
|
||||
var header = initProtoBuffer()
|
||||
header.write(1, cid.data.buffer)
|
||||
header.write(1, manifest.treeCid.data.buffer)
|
||||
header.write(2, manifest.blockSize.uint32)
|
||||
header.write(3, manifest.len.uint32)
|
||||
header.write(5, manifest.originalBytes.uint64)
|
||||
header.write(3, manifest.datasetSize.uint64)
|
||||
header.write(4, manifest.codec.uint32)
|
||||
header.write(5, manifest.hcodec.uint32)
|
||||
header.write(6, manifest.version.uint32)
|
||||
if manifest.protected:
|
||||
var erasureInfo = initProtoBuffer()
|
||||
erasureInfo.write(1, manifest.K.uint32)
|
||||
erasureInfo.write(2, manifest.M.uint32)
|
||||
erasureInfo.write(3, manifest.originalCid.data.buffer)
|
||||
erasureInfo.write(4, manifest.originalLen.uint32)
|
||||
erasureInfo.write(1, manifest.ecK.uint32)
|
||||
erasureInfo.write(2, manifest.ecM.uint32)
|
||||
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
|
||||
erasureInfo.write(4, manifest.originalDatasetSize.uint64)
|
||||
erasureInfo.write(5, manifest.protectedStrategy.uint32)
|
||||
|
||||
if manifest.verifiable:
|
||||
var verificationInfo = initProtoBuffer()
|
||||
verificationInfo.write(1, manifest.verifyRoot.data.buffer)
|
||||
for slotRoot in manifest.slotRoots:
|
||||
verificationInfo.write(2, slotRoot.data.buffer)
|
||||
verificationInfo.write(3, manifest.cellSize.uint32)
|
||||
verificationInfo.write(4, manifest.verifiableStrategy.uint32)
|
||||
erasureInfo.write(6, verificationInfo)
|
||||
|
||||
erasureInfo.finish()
|
||||
header.write(7, erasureInfo)
|
||||
|
||||
header.write(4, erasureInfo)
|
||||
|
||||
pbNode.write(1, header) # set the rootHash Cid as the data field
|
||||
pbNode.write(1, header) # set the treeCid as the data field
|
||||
pbNode.finish()
|
||||
|
||||
return pbNode.buffer.success
|
||||
|
||||
func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
|
||||
proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
## Decode a manifest from a data blob
|
||||
##
|
||||
|
||||
|
@ -89,105 +103,131 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
|
|||
pbNode = initProtoBuffer(data)
|
||||
pbHeader: ProtoBuffer
|
||||
pbErasureInfo: ProtoBuffer
|
||||
rootHash: seq[byte]
|
||||
originalCid: seq[byte]
|
||||
originalBytes: uint64
|
||||
pbVerificationInfo: ProtoBuffer
|
||||
treeCidBuf: seq[byte]
|
||||
originalTreeCid: seq[byte]
|
||||
datasetSize: uint64
|
||||
codec: uint32
|
||||
hcodec: uint32
|
||||
version: uint32
|
||||
blockSize: uint32
|
||||
blocksLen: uint32
|
||||
originalLen: uint32
|
||||
K, M: uint32
|
||||
blocks: seq[Cid]
|
||||
originalDatasetSize: uint64
|
||||
ecK, ecM: uint32
|
||||
protectedStrategy: uint32
|
||||
verifyRoot: seq[byte]
|
||||
slotRoots: seq[seq[byte]]
|
||||
cellSize: uint32
|
||||
verifiableStrategy: uint32
|
||||
|
||||
# Decode `Header` message
|
||||
if pbNode.getField(1, pbHeader).isErr:
|
||||
return failure("Unable to decode `Header` from dag-pb manifest!")
|
||||
|
||||
# Decode `Header` contents
|
||||
if pbHeader.getField(1, rootHash).isErr:
|
||||
return failure("Unable to decode `rootHash` from manifest!")
|
||||
if pbHeader.getField(1, treeCidBuf).isErr:
|
||||
return failure("Unable to decode `treeCid` from manifest!")
|
||||
|
||||
if pbHeader.getField(2, blockSize).isErr:
|
||||
return failure("Unable to decode `blockSize` from manifest!")
|
||||
|
||||
if pbHeader.getField(3, blocksLen).isErr:
|
||||
return failure("Unable to decode `blocksLen` from manifest!")
|
||||
if pbHeader.getField(3, datasetSize).isErr:
|
||||
return failure("Unable to decode `datasetSize` from manifest!")
|
||||
|
||||
if pbHeader.getField(5, originalBytes).isErr:
|
||||
return failure("Unable to decode `originalBytes` from manifest!")
|
||||
if pbHeader.getField(4, codec).isErr:
|
||||
return failure("Unable to decode `codec` from manifest!")
|
||||
|
||||
if pbHeader.getField(4, pbErasureInfo).isErr:
|
||||
if pbHeader.getField(5, hcodec).isErr:
|
||||
return failure("Unable to decode `hcodec` from manifest!")
|
||||
|
||||
if pbHeader.getField(6, version).isErr:
|
||||
return failure("Unable to decode `version` from manifest!")
|
||||
|
||||
if pbHeader.getField(7, pbErasureInfo).isErr:
|
||||
return failure("Unable to decode `erasureInfo` from manifest!")
|
||||
|
||||
if pbErasureInfo.buffer.len > 0:
|
||||
if pbErasureInfo.getField(1, K).isErr:
|
||||
let protected = pbErasureInfo.buffer.len > 0
|
||||
var verifiable = false
|
||||
if protected:
|
||||
if pbErasureInfo.getField(1, ecK).isErr:
|
||||
return failure("Unable to decode `K` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(2, M).isErr:
|
||||
if pbErasureInfo.getField(2, ecM).isErr:
|
||||
return failure("Unable to decode `M` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(3, originalCid).isErr:
|
||||
return failure("Unable to decode `originalCid` from manifest!")
|
||||
if pbErasureInfo.getField(3, originalTreeCid).isErr:
|
||||
return failure("Unable to decode `originalTreeCid` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(4, originalLen).isErr:
|
||||
return failure("Unable to decode `originalLen` from manifest!")
|
||||
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
|
||||
return failure("Unable to decode `originalDatasetSize` from manifest!")
|
||||
|
||||
let rootHashCid = ? Cid.init(rootHash).mapFailure
|
||||
var linksBuf: seq[seq[byte]]
|
||||
if pbNode.getRepeatedField(2, linksBuf).isOk:
|
||||
for pbLinkBuf in linksBuf:
|
||||
var
|
||||
blocksBuf: seq[seq[byte]]
|
||||
blockBuf: seq[byte]
|
||||
pbLink = initProtoBuffer(pbLinkBuf)
|
||||
if pbErasureInfo.getField(5, protectedStrategy).isErr:
|
||||
return failure("Unable to decode `protectedStrategy` from manifest!")
|
||||
|
||||
if pbLink.getField(1, blockBuf).isOk:
|
||||
blocks.add(? Cid.init(blockBuf).mapFailure)
|
||||
if pbErasureInfo.getField(6, pbVerificationInfo).isErr:
|
||||
return failure("Unable to decode `verificationInfo` from manifest!")
|
||||
|
||||
if blocksLen.int != blocks.len:
|
||||
return failure("Total blocks and length of blocks in header don't match!")
|
||||
verifiable = pbVerificationInfo.buffer.len > 0
|
||||
if verifiable:
|
||||
if pbVerificationInfo.getField(1, verifyRoot).isErr:
|
||||
return failure("Unable to decode `verifyRoot` from manifest!")
|
||||
|
||||
var
|
||||
self = Manifest(
|
||||
rootHash: rootHashCid.some,
|
||||
originalBytes: originalBytes.int,
|
||||
blockSize: blockSize.int,
|
||||
blocks: blocks,
|
||||
hcodec: (? rootHashCid.mhash.mapFailure).mcodec,
|
||||
codec: rootHashCid.mcodec,
|
||||
version: rootHashCid.cidver,
|
||||
protected: pbErasureInfo.buffer.len > 0)
|
||||
if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr:
|
||||
return failure("Unable to decode `slotRoots` from manifest!")
|
||||
|
||||
if self.protected:
|
||||
self.K = K.int
|
||||
self.M = M.int
|
||||
self.originalCid = ? Cid.init(originalCid).mapFailure
|
||||
self.originalLen = originalLen.int
|
||||
if pbVerificationInfo.getField(3, cellSize).isErr:
|
||||
return failure("Unable to decode `cellSize` from manifest!")
|
||||
|
||||
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
|
||||
return failure("Unable to decode `verifiableStrategy` from manifest!")
|
||||
|
||||
let
|
||||
treeCid = ? Cid.init(treeCidBuf).mapFailure
|
||||
|
||||
let
|
||||
self = if protected:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec,
|
||||
ecK = ecK.int,
|
||||
ecM = ecM.int,
|
||||
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
|
||||
originalDatasetSize = originalDatasetSize.NBytes,
|
||||
strategy = StrategyType(protectedStrategy))
|
||||
else:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec)
|
||||
|
||||
? self.verify()
|
||||
|
||||
if verifiable:
|
||||
let
|
||||
verifyRootCid = ? Cid.init(verifyRoot).mapFailure
|
||||
slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure)
|
||||
|
||||
return Manifest.new(
|
||||
manifest = self,
|
||||
verifyRoot = verifyRootCid,
|
||||
slotRoots = slotRootCids,
|
||||
cellSize = cellSize.NBytes,
|
||||
strategy = StrategyType(verifiableStrategy)
|
||||
)
|
||||
|
||||
self.success
|
||||
|
||||
proc encode*(
|
||||
self: Manifest,
|
||||
encoder = ManifestContainers[$DagPBCodec]): ?!seq[byte] =
|
||||
## Encode a manifest using `encoder`
|
||||
##
|
||||
|
||||
if self.rootHash.isNone:
|
||||
? self.makeRoot()
|
||||
|
||||
encoder.encode(self)
|
||||
|
||||
func decode*(
|
||||
_: type Manifest,
|
||||
data: openArray[byte],
|
||||
decoder = ManifestContainers[$DagPBCodec]): ?!Manifest =
|
||||
func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
||||
## Decode a manifest using `decoder`
|
||||
##
|
||||
|
||||
decoder.decode(data)
|
||||
if not ? blk.cid.isManifest:
|
||||
return failure "Cid not a manifest codec"
|
||||
|
||||
func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
||||
without contentType =? blk.cid.contentType() and
|
||||
containerType =? ManifestContainers.?[$contentType]:
|
||||
return failure "CID has invalid content type for manifest"
|
||||
Manifest.decode(blk.data, containerType)
|
||||
Manifest.decode(blk.data)
|
||||
|
|
|
@ -14,209 +14,327 @@ import pkg/upraises
|
|||
push: {.upraises: [].}
|
||||
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
import pkg/libp2p/[cid, multihash, multicodec]
|
||||
import pkg/questionable/results
|
||||
import pkg/chronicles
|
||||
|
||||
import ../errors
|
||||
import ../utils
|
||||
import ../utils/json
|
||||
import ../units
|
||||
import ../blocktype
|
||||
import ./types
|
||||
import ./coders
|
||||
import ../indexingstrategy
|
||||
import ../logutils
|
||||
|
||||
|
||||
# TODO: Manifest should be reworked to more concrete types,
|
||||
# perhaps using inheritance
|
||||
type
|
||||
Manifest* = ref object of RootObj
|
||||
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
codec: MultiCodec # Dataset codec
|
||||
hcodec: MultiCodec # Multihash codec
|
||||
version: CidVersion # Cid version
|
||||
case protected {.serialize.}: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
ecK: int # Number of blocks to encode
|
||||
ecM: int # Number of resulting parity blocks
|
||||
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
||||
originalDatasetSize: NBytes
|
||||
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs
|
||||
of true:
|
||||
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
||||
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
||||
cellSize: NBytes # Size of each slot cell
|
||||
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
else:
|
||||
discard
|
||||
else:
|
||||
discard
|
||||
|
||||
############################################################
|
||||
# Accessors
|
||||
############################################################
|
||||
|
||||
func blockSize*(self: Manifest): NBytes =
|
||||
self.blockSize
|
||||
|
||||
func datasetSize*(self: Manifest): NBytes =
|
||||
self.datasetSize
|
||||
|
||||
func version*(self: Manifest): CidVersion =
|
||||
self.version
|
||||
|
||||
func hcodec*(self: Manifest): MultiCodec =
|
||||
self.hcodec
|
||||
|
||||
func codec*(self: Manifest): MultiCodec =
|
||||
self.codec
|
||||
|
||||
func protected*(self: Manifest): bool =
|
||||
self.protected
|
||||
|
||||
func ecK*(self: Manifest): int =
|
||||
self.ecK
|
||||
|
||||
func ecM*(self: Manifest): int =
|
||||
self.ecM
|
||||
|
||||
func originalTreeCid*(self: Manifest): Cid =
|
||||
self.originalTreeCid
|
||||
|
||||
func originalBlocksCount*(self: Manifest): int =
|
||||
divUp(self.originalDatasetSize.int, self.blockSize.int)
|
||||
|
||||
func originalDatasetSize*(self: Manifest): NBytes =
|
||||
self.originalDatasetSize
|
||||
|
||||
func treeCid*(self: Manifest): Cid =
|
||||
self.treeCid
|
||||
|
||||
func blocksCount*(self: Manifest): int =
|
||||
divUp(self.datasetSize.int, self.blockSize.int)
|
||||
|
||||
func verifiable*(self: Manifest): bool =
|
||||
bool (self.protected and self.verifiable)
|
||||
|
||||
func verifyRoot*(self: Manifest): Cid =
|
||||
self.verifyRoot
|
||||
|
||||
func slotRoots*(self: Manifest): seq[Cid] =
|
||||
self.slotRoots
|
||||
|
||||
func numSlots*(self: Manifest): int =
|
||||
self.ecK + self.ecM
|
||||
|
||||
func cellSize*(self: Manifest): NBytes =
|
||||
self.cellSize
|
||||
|
||||
func protectedStrategy*(self: Manifest): StrategyType =
|
||||
self.protectedStrategy
|
||||
|
||||
func verifiableStrategy*(self: Manifest): StrategyType =
|
||||
self.verifiableStrategy
|
||||
|
||||
func numSlotBlocks*(self: Manifest): int =
|
||||
divUp(self.blocksCount, self.numSlots)
|
||||
|
||||
############################################################
|
||||
# Operations on block list
|
||||
############################################################
|
||||
|
||||
func len*(self: Manifest): int =
|
||||
self.blocks.len
|
||||
|
||||
func `[]`*(self: Manifest, i: Natural): Cid =
|
||||
self.blocks[i]
|
||||
|
||||
func `[]=`*(self: var Manifest, i: Natural, item: Cid) =
|
||||
self.rootHash = Cid.none
|
||||
self.blocks[i] = item
|
||||
|
||||
func `[]`*(self: Manifest, i: BackwardsIndex): Cid =
|
||||
self.blocks[self.len - i.int]
|
||||
|
||||
func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) =
|
||||
self.rootHash = Cid.none
|
||||
self.blocks[self.len - i.int] = item
|
||||
|
||||
proc add*(self: Manifest, cid: Cid) =
|
||||
assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks
|
||||
self.rootHash = Cid.none
|
||||
trace "Adding cid to manifest", cid
|
||||
self.blocks.add(cid)
|
||||
self.originalBytes = self.blocks.len * self.blockSize
|
||||
|
||||
iterator items*(self: Manifest): Cid =
|
||||
for b in self.blocks:
|
||||
yield b
|
||||
|
||||
iterator pairs*(self: Manifest): tuple[key: int, val: Cid] =
|
||||
for pair in self.blocks.pairs():
|
||||
yield pair
|
||||
|
||||
func contains*(self: Manifest, cid: Cid): bool =
|
||||
cid in self.blocks
|
||||
func isManifest*(cid: Cid): ?!bool =
|
||||
success (ManifestCodec == ? cid.contentType().mapFailure(CodexError))
|
||||
|
||||
func isManifest*(mc: MultiCodec): ?!bool =
|
||||
success mc == ManifestCodec
|
||||
|
||||
############################################################
|
||||
# Various sizes and verification
|
||||
############################################################
|
||||
|
||||
func bytes*(self: Manifest, pad = true): int =
|
||||
## Compute how many bytes corresponding StoreStream(Manifest, pad) will return
|
||||
if pad or self.protected:
|
||||
self.len * self.blockSize
|
||||
else:
|
||||
self.originalBytes
|
||||
|
||||
func rounded*(self: Manifest): int =
|
||||
## Number of data blocks in *protected* manifest including padding at the end
|
||||
roundUp(self.originalLen, self.K)
|
||||
roundUp(self.originalBlocksCount, self.ecK)
|
||||
|
||||
func steps*(self: Manifest): int =
|
||||
## Number of EC groups in *protected* manifest
|
||||
divUp(self.originalLen, self.K)
|
||||
divUp(self.rounded, self.ecK)
|
||||
|
||||
func verify*(self: Manifest): ?!void =
|
||||
## Check manifest correctness
|
||||
##
|
||||
let originalLen = (if self.protected: self.originalLen else: self.len)
|
||||
|
||||
if divUp(self.originalBytes, self.blockSize) != originalLen:
|
||||
return failure newException(CodexError, "Broken manifest: wrong originalBytes")
|
||||
|
||||
if self.protected and (self.len != self.steps * (self.K + self.M)):
|
||||
return failure newException(CodexError, "Broken manifest: wrong originalLen")
|
||||
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
|
||||
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
||||
|
||||
return success()
|
||||
|
||||
func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
|
||||
self.treeCid.success
|
||||
|
||||
############################################################
|
||||
# Cid computation
|
||||
############################################################
|
||||
|
||||
template hashBytes(mh: MultiHash): seq[byte] =
|
||||
## get the hash bytes of a multihash object
|
||||
##
|
||||
|
||||
mh.data.buffer[mh.dpos..(mh.dpos + mh.size - 1)]
|
||||
|
||||
proc makeRoot*(self: Manifest): ?!void =
|
||||
## Create a tree hash root of the contained
|
||||
## block hashes
|
||||
##
|
||||
|
||||
var
|
||||
stack: seq[MultiHash]
|
||||
|
||||
for cid in self:
|
||||
stack.add(? cid.mhash.mapFailure)
|
||||
|
||||
while stack.len > 1:
|
||||
let
|
||||
(b1, b2) = (stack.pop(), stack.pop())
|
||||
mh = ? MultiHash.digest(
|
||||
$self.hcodec,
|
||||
(b1.hashBytes() & b2.hashBytes()))
|
||||
.mapFailure
|
||||
stack.add(mh)
|
||||
|
||||
if stack.len == 1:
|
||||
let cid = ? Cid.init(
|
||||
self.version,
|
||||
self.codec,
|
||||
(? EmptyDigests[self.version][self.hcodec].catch))
|
||||
.mapFailure
|
||||
|
||||
self.rootHash = cid.some
|
||||
|
||||
success()
|
||||
|
||||
proc cid*(self: Manifest): ?!Cid =
|
||||
## Generate a root hash using the treehash algorithm
|
||||
##
|
||||
|
||||
if self.rootHash.isNone:
|
||||
? self.makeRoot()
|
||||
|
||||
(!self.rootHash).success
|
||||
func `==`*(a, b: Manifest): bool =
|
||||
(a.treeCid == b.treeCid) and
|
||||
(a.datasetSize == b.datasetSize) and
|
||||
(a.blockSize == b.blockSize) and
|
||||
(a.version == b.version) and
|
||||
(a.hcodec == b.hcodec) and
|
||||
(a.codec == b.codec) and
|
||||
(a.protected == b.protected) and
|
||||
(if a.protected:
|
||||
(a.ecK == b.ecK) and
|
||||
(a.ecM == b.ecM) and
|
||||
(a.originalTreeCid == b.originalTreeCid) and
|
||||
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||
(a.protectedStrategy == b.protectedStrategy) and
|
||||
(a.verifiable == b.verifiable) and
|
||||
(if a.verifiable:
|
||||
(a.verifyRoot == b.verifyRoot) and
|
||||
(a.slotRoots == b.slotRoots) and
|
||||
(a.cellSize == b.cellSize) and
|
||||
(a.verifiableStrategy == b.verifiableStrategy)
|
||||
else:
|
||||
true)
|
||||
else:
|
||||
true)
|
||||
|
||||
func `$`*(self: Manifest): string =
|
||||
"treeCid: " & $self.treeCid &
|
||||
", datasetSize: " & $self.datasetSize &
|
||||
", blockSize: " & $self.blockSize &
|
||||
", version: " & $self.version &
|
||||
", hcodec: " & $self.hcodec &
|
||||
", codec: " & $self.codec &
|
||||
", protected: " & $self.protected &
|
||||
(if self.protected:
|
||||
", ecK: " & $self.ecK &
|
||||
", ecM: " & $self.ecM &
|
||||
", originalTreeCid: " & $self.originalTreeCid &
|
||||
", originalDatasetSize: " & $self.originalDatasetSize &
|
||||
", verifiable: " & $self.verifiable &
|
||||
(if self.verifiable:
|
||||
", verifyRoot: " & $self.verifyRoot &
|
||||
", slotRoots: " & $self.slotRoots
|
||||
else:
|
||||
"")
|
||||
else:
|
||||
"")
|
||||
|
||||
############################################################
|
||||
# Constructors
|
||||
############################################################
|
||||
|
||||
proc new*(
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
blocks: openArray[Cid] = [],
|
||||
protected = false,
|
||||
version = CIDv1,
|
||||
hcodec = multiCodec("sha2-256"),
|
||||
codec = multiCodec("raw"),
|
||||
blockSize = BlockSize): ?!T =
|
||||
## Create a manifest using array of `Cid`s
|
||||
##
|
||||
|
||||
if hcodec notin EmptyDigests[version]:
|
||||
return failure("Unsupported manifest hash codec!")
|
||||
treeCid: Cid,
|
||||
blockSize: NBytes,
|
||||
datasetSize: NBytes,
|
||||
version: CidVersion = CIDv1,
|
||||
hcodec = Sha256HashCodec,
|
||||
codec = BlockCodec,
|
||||
protected = false): Manifest =
|
||||
|
||||
T(
|
||||
blocks: @blocks,
|
||||
treeCid: treeCid,
|
||||
blockSize: blockSize,
|
||||
datasetSize: datasetSize,
|
||||
version: version,
|
||||
codec: codec,
|
||||
hcodec: hcodec,
|
||||
blockSize: blockSize,
|
||||
originalBytes: blocks.len * blockSize,
|
||||
protected: protected).success
|
||||
protected: protected)
|
||||
|
||||
proc new*(
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
K, M: int): ?!Manifest =
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
ecK, ecM: int,
|
||||
strategy = SteppedStrategy): Manifest =
|
||||
## Create an erasure protected dataset from an
|
||||
## un-protected one
|
||||
## unprotected one
|
||||
##
|
||||
|
||||
var
|
||||
self = Manifest(
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
originalBytes: manifest.originalBytes,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: true,
|
||||
K: K, M: M,
|
||||
originalCid: ? manifest.cid,
|
||||
originalLen: manifest.len)
|
||||
Manifest(
|
||||
treeCid: treeCid,
|
||||
datasetSize: datasetSize,
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: true,
|
||||
ecK: ecK, ecM: ecM,
|
||||
originalTreeCid: manifest.treeCid,
|
||||
originalDatasetSize: manifest.datasetSize,
|
||||
protectedStrategy: strategy)
|
||||
|
||||
let
|
||||
encodedLen = self.rounded + (self.steps * M)
|
||||
|
||||
self.blocks = newSeq[Cid](encodedLen)
|
||||
|
||||
# copy original manifest blocks
|
||||
for i in 0..<self.rounded:
|
||||
if i < manifest.len:
|
||||
self.blocks[i] = manifest[i]
|
||||
else:
|
||||
self.blocks[i] = EmptyCid[manifest.version]
|
||||
.catch
|
||||
.get()[manifest.hcodec]
|
||||
.catch
|
||||
.get()
|
||||
|
||||
? self.verify()
|
||||
self.success
|
||||
|
||||
proc new*(
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
data: openArray[byte],
|
||||
decoder = ManifestContainers[$DagPBCodec]): ?!T =
|
||||
Manifest.decode(data, decoder)
|
||||
manifest: Manifest): Manifest =
|
||||
## Create an unprotected dataset from an
|
||||
## erasure protected one
|
||||
##
|
||||
|
||||
Manifest(
|
||||
treeCid: manifest.originalTreeCid,
|
||||
datasetSize: manifest.originalDatasetSize,
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: false)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
blockSize: NBytes,
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
codec: MultiCodec,
|
||||
ecK: int,
|
||||
ecM: int,
|
||||
originalTreeCid: Cid,
|
||||
originalDatasetSize: NBytes,
|
||||
strategy = SteppedStrategy): Manifest =
|
||||
|
||||
Manifest(
|
||||
treeCid: treeCid,
|
||||
datasetSize: datasetSize,
|
||||
blockSize: blockSize,
|
||||
version: version,
|
||||
hcodec: hcodec,
|
||||
codec: codec,
|
||||
protected: true,
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
originalTreeCid: originalTreeCid,
|
||||
originalDatasetSize: originalDatasetSize,
|
||||
protectedStrategy: strategy)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
verifyRoot: Cid,
|
||||
slotRoots: openArray[Cid],
|
||||
cellSize = DefaultCellSize,
|
||||
strategy = LinearStrategy): ?!Manifest =
|
||||
## Create a verifiable dataset from an
|
||||
## protected one
|
||||
##
|
||||
|
||||
if not manifest.protected:
|
||||
return failure newException(
|
||||
CodexError, "Can create verifiable manifest only from protected manifest.")
|
||||
|
||||
if slotRoots.len != manifest.numSlots:
|
||||
return failure newException(
|
||||
CodexError, "Wrong number of slot roots.")
|
||||
|
||||
success Manifest(
|
||||
treeCid: manifest.treeCid,
|
||||
datasetSize: manifest.datasetSize,
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: true,
|
||||
ecK: manifest.ecK,
|
||||
ecM: manifest.ecM,
|
||||
originalTreeCid: manifest.originalTreeCid,
|
||||
originalDatasetSize: manifest.originalDatasetSize,
|
||||
protectedStrategy: manifest.protectedStrategy,
|
||||
verifiable: true,
|
||||
verifyRoot: verifyRoot,
|
||||
slotRoots: @slotRoots,
|
||||
cellSize: cellSize,
|
||||
verifiableStrategy: strategy)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
data: openArray[byte]): ?!Manifest =
|
||||
## Create a manifest instance from given data
|
||||
##
|
||||
|
||||
Manifest.decode(data)
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
# This module defines Manifest and all related types
|
||||
|
||||
import std/tables
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
|
||||
const
|
||||
DagPBCodec* = multiCodec("dag-pb")
|
||||
|
||||
type
|
||||
ManifestCoderType*[codec: static MultiCodec] = object
|
||||
DagPBCoder* = ManifestCoderType[multiCodec("dag-pb")]
|
||||
|
||||
const
|
||||
ManifestContainers* = {
|
||||
$DagPBCodec: DagPBCoder()
|
||||
}.toTable
|
||||
|
||||
type
|
||||
Manifest* = ref object of RootObj
|
||||
rootHash*: ?Cid # Root (tree) hash of the contained data set
|
||||
originalBytes*: int # Exact size of the original (uploaded) file
|
||||
blockSize*: int # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
blocks*: seq[Cid] # Block Cid
|
||||
version*: CidVersion # Cid version
|
||||
hcodec*: MultiCodec # Multihash codec
|
||||
codec*: MultiCodec # Data set codec
|
||||
case protected*: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
K*: int # Number of blocks to encode
|
||||
M*: int # Number of resulting parity blocks
|
||||
originalCid*: Cid # The original Cid of the dataset being erasure coded
|
||||
originalLen*: int # The length of the original manifest
|
||||
else:
|
||||
discard
|
124
codex/market.nim
124
codex/market.nim
|
@ -1,26 +1,62 @@
|
|||
import pkg/chronos
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/ethers/erc20
|
||||
import ./contracts/requests
|
||||
import ./contracts/proofs
|
||||
import ./clock
|
||||
import ./errors
|
||||
import ./periods
|
||||
|
||||
export chronos
|
||||
export questionable
|
||||
export requests
|
||||
export proofs
|
||||
export SecondsSince1970
|
||||
export periods
|
||||
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
MarketError* = object of CodexError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* = proc(id: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].}
|
||||
OnRequest* = proc(id: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) {.gcsafe, upraises:[].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].}
|
||||
PastStorageRequest* = object
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: UInt256
|
||||
ProofChallenge* = array[32, byte]
|
||||
|
||||
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofTimeout*(market: Market): Future[UInt256] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
||||
let downtime = await market.proofDowntime
|
||||
let pntr = await market.getPointer(slotId)
|
||||
return pntr < downtime
|
||||
|
||||
method requestStorage*(market: Market,
|
||||
request: StorageRequest) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
@ -28,28 +64,49 @@ method requestStorage*(market: Market,
|
|||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequest*(market: Market,
|
||||
id: RequestId):
|
||||
Future[?StorageRequest] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getState*(market: Market,
|
||||
method requestState*(market: Market,
|
||||
requestId: RequestId): Future[?RequestState] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotState*(market: Market,
|
||||
slotId: SlotId): Future[SlotState] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequestEnd*(market: Market,
|
||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestExpiresAt*(market: Market,
|
||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getHost*(market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256): Future[?Address] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getActiveSlot*(
|
||||
market: Market,
|
||||
slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method fillSlot*(market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
proof: seq[byte]) {.base, async.} =
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method withdrawFunds*(market: Market,
|
||||
|
@ -61,12 +118,48 @@ method subscribeRequests*(market: Market,
|
|||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method isProofRequired*(market: Market,
|
||||
id: SlotId): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method willProofBeRequired*(market: Market,
|
||||
id: SlotId): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method submitProof*(market: Market,
|
||||
id: SlotId,
|
||||
proof: Groth16Proof) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method markProofAsMissing*(market: Market,
|
||||
id: SlotId,
|
||||
period: Period) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canProofBeMarkedAsMissing*(market: Market,
|
||||
id: SlotId,
|
||||
period: Period): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(market: Market,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(market: Market,
|
||||
callback: OnSlotFilled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
|
@ -74,17 +167,42 @@ method subscribeSlotFilled*(market: Market,
|
|||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFreed*(market: Market,
|
||||
callback: OnSlotFreed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(market: Market,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(market: Market,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeProofSubmission*(market: Market,
|
||||
callback: OnProofSubmitted):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequests*(market: Market,
|
||||
blocksAgo: int):
|
||||
Future[seq[PastStorageRequest]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
import ./merkletree/merkletree
|
||||
import ./merkletree/codex
|
||||
import ./merkletree/poseidon2
|
||||
|
||||
export codex, poseidon2, merkletree
|
||||
|
||||
type
|
||||
SomeMerkleTree* = ByteTree | CodexTree | Poseidon2Tree
|
||||
SomeMerkleProof* = ByteProof | CodexProof | Poseidon2Proof
|
||||
SomeMerkleHash* = ByteHash | Poseidon2Hash
|
|
@ -0,0 +1,4 @@
|
|||
import ./codex/codex
|
||||
import ./codex/coders
|
||||
|
||||
export codex, coders
|
|
@ -0,0 +1,119 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/serde/json
|
||||
|
||||
import ../../units
|
||||
import ../../errors
|
||||
|
||||
import ./codex
|
||||
|
||||
const MaxMerkleTreeSize = 100.MiBs.uint
|
||||
const MaxMerkleProofSize = 1.MiBs.uint
|
||||
|
||||
proc encode*(self: CodexTree): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.leavesCount.uint64)
|
||||
for node in self.nodes:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(3, nodesPb)
|
||||
|
||||
pb.finish
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
||||
var mcodecCode: uint64
|
||||
var leavesCount: uint64
|
||||
discard ? pb.getField(1, mcodecCode).mapFailure
|
||||
discard ? pb.getField(2, leavesCount).mapFailure
|
||||
|
||||
let mcodec = MultiCodec.codec(mcodecCode.int)
|
||||
if mcodec == InvalidMultiCodec:
|
||||
return failure("Invalid MultiCodec code " & $mcodecCode)
|
||||
|
||||
var
|
||||
nodesBuff: seq[seq[byte]]
|
||||
nodes: seq[ByteHash]
|
||||
|
||||
if ? pb.getRepeatedField(3, nodesBuff).mapFailure:
|
||||
for nodeBuff in nodesBuff:
|
||||
var node: ByteHash
|
||||
discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure
|
||||
nodes.add node
|
||||
|
||||
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||
|
||||
proc encode*(self: CodexProof): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.index.uint64)
|
||||
pb.write(3, self.nleaves.uint64)
|
||||
|
||||
for node in self.path:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(4, nodesPb)
|
||||
|
||||
pb.finish
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
||||
var mcodecCode: uint64
|
||||
var index: uint64
|
||||
var nleaves: uint64
|
||||
discard ? pb.getField(1, mcodecCode).mapFailure
|
||||
|
||||
let mcodec = MultiCodec.codec(mcodecCode.int)
|
||||
if mcodec == InvalidMultiCodec:
|
||||
return failure("Invalid MultiCodec code " & $mcodecCode)
|
||||
|
||||
discard ? pb.getField(2, index).mapFailure
|
||||
discard ? pb.getField(3, nleaves).mapFailure
|
||||
|
||||
var
|
||||
nodesBuff: seq[seq[byte]]
|
||||
nodes: seq[ByteHash]
|
||||
|
||||
if ? pb.getRepeatedField(4, nodesBuff).mapFailure:
|
||||
for nodeBuff in nodesBuff:
|
||||
var node: ByteHash
|
||||
let nodePb = initProtoBuffer(nodeBuff)
|
||||
discard ? nodePb.getField(1, node).mapFailure
|
||||
nodes.add node
|
||||
|
||||
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
|
||||
|
||||
proc fromJson*(
|
||||
_: type CodexProof,
|
||||
json: JsonNode
|
||||
): ?!CodexProof =
|
||||
expectJsonKind(Cid, JString, json)
|
||||
var bytes: seq[byte]
|
||||
try:
|
||||
bytes = hexToSeqByte(json.str)
|
||||
except ValueError as err:
|
||||
return failure(err)
|
||||
|
||||
CodexProof.decode(bytes)
|
||||
|
||||
func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode())
|
|
@ -0,0 +1,255 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/bitops
|
||||
import std/sequtils
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p/[cid, multicodec, multihash]
|
||||
|
||||
import ../../utils
|
||||
import ../../rng
|
||||
import ../../errors
|
||||
import ../../blocktype
|
||||
|
||||
from ../../utils/digest import digestBytes
|
||||
|
||||
import ../merkletree
|
||||
|
||||
export merkletree
|
||||
|
||||
logScope:
|
||||
topics = "codex merkletree"
|
||||
|
||||
type
|
||||
ByteTreeKey* {.pure.} = enum
|
||||
KeyNone = 0x0.byte
|
||||
KeyBottomLayer = 0x1.byte
|
||||
KeyOdd = 0x2.byte
|
||||
KeyOddAndBottomLayer = 0x3.byte
|
||||
|
||||
ByteHash* = seq[byte]
|
||||
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
|
||||
ByteProof* = MerkleProof[ByteHash, ByteTreeKey]
|
||||
|
||||
CodexTree* = ref object of ByteTree
|
||||
mcodec*: MultiCodec
|
||||
|
||||
CodexProof* = ref object of ByteProof
|
||||
mcodec*: MultiCodec
|
||||
|
||||
func mhash*(mcodec: MultiCodec): ?!MHash =
|
||||
let
|
||||
mhash = CodeHashes.getOrDefault(mcodec)
|
||||
|
||||
if isNil(mhash.coder):
|
||||
return failure "Invalid multihash codec"
|
||||
|
||||
success mhash
|
||||
|
||||
func digestSize*(self: (CodexTree or CodexProof)): int =
|
||||
## Number of leaves
|
||||
##
|
||||
|
||||
self.mhash.size
|
||||
|
||||
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
||||
var
|
||||
proof = CodexProof(mcodec: self.mcodec)
|
||||
|
||||
? self.getProof(index, proof)
|
||||
|
||||
success proof
|
||||
|
||||
func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
|
||||
## Verify hash
|
||||
##
|
||||
|
||||
let
|
||||
rootBytes = root.digestBytes
|
||||
leafBytes = leaf.digestBytes
|
||||
|
||||
if self.mcodec != root.mcodec or
|
||||
self.mcodec != leaf.mcodec:
|
||||
return failure "Hash codec mismatch"
|
||||
|
||||
if rootBytes.len != root.size and
|
||||
leafBytes.len != leaf.size:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
self.verify(leafBytes, rootBytes)
|
||||
|
||||
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
|
||||
self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure)
|
||||
|
||||
proc rootCid*(
|
||||
self: CodexTree,
|
||||
version = CIDv1,
|
||||
dataCodec = DatasetRootCodec): ?!Cid =
|
||||
|
||||
if (? self.root).len == 0:
|
||||
return failure "Empty root"
|
||||
|
||||
let
|
||||
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
|
||||
|
||||
Cid.init(version, DatasetRootCodec, mhash).mapFailure
|
||||
|
||||
func getLeafCid*(
|
||||
self: CodexTree,
|
||||
i: Natural,
|
||||
version = CIDv1,
|
||||
dataCodec = BlockCodec): ?!Cid =
|
||||
|
||||
if i >= self.leavesCount:
|
||||
return failure "Invalid leaf index " & $i
|
||||
|
||||
let
|
||||
leaf = self.leaves[i]
|
||||
mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure
|
||||
|
||||
Cid.init(version, dataCodec, mhash).mapFailure
|
||||
|
||||
proc `$`*(self: CodexTree): string =
|
||||
let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none"
|
||||
"CodexTree(" &
|
||||
" root: " & root &
|
||||
", leavesCount: " & $self.leavesCount &
|
||||
", levels: " & $self.levels &
|
||||
", mcodec: " & $self.mcodec & " )"
|
||||
|
||||
proc `$`*(self: CodexProof): string =
|
||||
"CodexProof(" &
|
||||
" nleaves: " & $self.nleaves &
|
||||
", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt( byteutils.toHex(it) ) &
|
||||
", mcodec: " & $self.mcodec & " )"
|
||||
|
||||
func compress*(
|
||||
x, y: openArray[byte],
|
||||
key: ByteTreeKey,
|
||||
mhash: MHash): ?!ByteHash =
|
||||
## Compress two hashes
|
||||
##
|
||||
|
||||
var digest = newSeq[byte](mhash.size)
|
||||
mhash.coder(@x & @y & @[ key.byte ], digest)
|
||||
success digest
|
||||
|
||||
func init*(
|
||||
_: type CodexTree,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
leaves: openArray[ByteHash]): ?!CodexTree =
|
||||
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mhash = ? mcodec.mhash()
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
Zero: ByteHash = newSeq[byte](mhash.size)
|
||||
|
||||
if mhash.size != leaves[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
var
|
||||
self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||
|
||||
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type CodexTree,
|
||||
leaves: openArray[MultiHash]): ?!CodexTree =
|
||||
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mcodec = leaves[0].mcodec
|
||||
leaves = leaves.mapIt( it.digestBytes )
|
||||
|
||||
CodexTree.init(mcodec, leaves)
|
||||
|
||||
func init*(
|
||||
_: type CodexTree,
|
||||
leaves: openArray[Cid]): ?!CodexTree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mcodec = (? leaves[0].mhash.mapFailure).mcodec
|
||||
leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes )
|
||||
|
||||
CodexTree.init(mcodec, leaves)
|
||||
|
||||
proc fromNodes*(
|
||||
_: type CodexTree,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
nodes: openArray[ByteHash],
|
||||
nleaves: int): ?!CodexTree =
|
||||
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
mhash = ? mcodec.mhash()
|
||||
Zero = newSeq[byte](mhash.size)
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
|
||||
if mhash.size != nodes[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
var
|
||||
self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec)
|
||||
layer = nleaves
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add( nodes[pos..<(pos + layer)] )
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
proof = ? self.getProof(index)
|
||||
|
||||
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
|
||||
return failure "Unable to verify tree built from nodes"
|
||||
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type CodexProof,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
index: int,
|
||||
nleaves: int,
|
||||
nodes: openArray[ByteHash]): ?!CodexProof =
|
||||
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
mhash = ? mcodec.mhash()
|
||||
Zero = newSeq[byte](mhash.size)
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
|
||||
success CodexProof(
|
||||
compress: compressor,
|
||||
zero: Zero,
|
||||
mcodec: mcodec,
|
||||
index: index,
|
||||
nleaves: nleaves,
|
||||
path: @nodes)
|
|
@ -0,0 +1,153 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/bitops
|
||||
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../errors
|
||||
|
||||
type
|
||||
CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
|
||||
|
||||
MerkleTree*[H, K] = ref object of RootObj
|
||||
layers* : seq[seq[H]]
|
||||
compress*: CompressFn[H, K]
|
||||
zero* : H
|
||||
|
||||
MerkleProof*[H, K] = ref object of RootObj
|
||||
index* : int # linear index of the leaf, starting from 0
|
||||
path* : seq[H] # order: from the bottom to the top
|
||||
nleaves* : int # number of leaves in the tree (=size of input)
|
||||
compress*: CompressFn[H, K] # compress function
|
||||
zero* : H # zero value
|
||||
|
||||
func depth*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers.len - 1
|
||||
|
||||
func leavesCount*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers[0].len
|
||||
|
||||
func levels*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers.len
|
||||
|
||||
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] =
|
||||
return self.layers[0]
|
||||
|
||||
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] =
|
||||
for layer in self.layers:
|
||||
yield layer
|
||||
|
||||
iterator nodes*[H, K](self: MerkleTree[H, K]): H =
|
||||
for layer in self.layers:
|
||||
for node in layer:
|
||||
yield node
|
||||
|
||||
func root*[H, K](self: MerkleTree[H, K]): ?!H =
|
||||
let last = self.layers[^1]
|
||||
if last.len != 1:
|
||||
return failure "invalid tree"
|
||||
|
||||
return success last[0]
|
||||
|
||||
func getProof*[H, K](
|
||||
self: MerkleTree[H, K],
|
||||
index: int,
|
||||
proof: MerkleProof[H, K]): ?!void =
|
||||
let depth = self.depth
|
||||
let nleaves = self.leavesCount
|
||||
|
||||
if not (index >= 0 and index < nleaves):
|
||||
return failure "index out of bounds"
|
||||
|
||||
var path : seq[H] = newSeq[H](depth)
|
||||
var k = index
|
||||
var m = nleaves
|
||||
for i in 0..<depth:
|
||||
let j = k xor 1
|
||||
path[i] = if (j < m): self.layers[i][j] else: self.zero
|
||||
k = k shr 1
|
||||
m = (m + 1) shr 1
|
||||
|
||||
proof.index = index
|
||||
proof.path = path
|
||||
proof.nleaves = nleaves
|
||||
proof.compress = self.compress
|
||||
|
||||
success()
|
||||
|
||||
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
|
||||
var
|
||||
proof = MerkleProof[H, K]()
|
||||
|
||||
? self.getProof(index, proof)
|
||||
|
||||
success proof
|
||||
|
||||
func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
|
||||
var
|
||||
m = proof.nleaves
|
||||
j = proof.index
|
||||
h = leaf
|
||||
bottomFlag = K.KeyBottomLayer
|
||||
|
||||
for p in proof.path:
|
||||
let oddIndex : bool = (bitand(j,1) != 0)
|
||||
if oddIndex:
|
||||
# the index of the child is odd, so the node itself can't be odd (a bit counterintuitive, yeah :)
|
||||
h = ? proof.compress( p, h, bottomFlag )
|
||||
else:
|
||||
if j == m - 1:
|
||||
# single child => odd node
|
||||
h = ? proof.compress( h, p, K(bottomFlag.ord + 2) )
|
||||
else:
|
||||
# even node
|
||||
h = ? proof.compress( h , p, bottomFlag )
|
||||
bottomFlag = K.KeyNone
|
||||
j = j shr 1
|
||||
m = (m+1) shr 1
|
||||
|
||||
return success h
|
||||
|
||||
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
|
||||
success bool(root == ? proof.reconstructRoot(leaf))
|
||||
|
||||
func merkleTreeWorker*[H, K](
|
||||
self: MerkleTree[H, K],
|
||||
xs: openArray[H],
|
||||
isBottomLayer: static bool): ?!seq[seq[H]] =
|
||||
|
||||
let a = low(xs)
|
||||
let b = high(xs)
|
||||
let m = b - a + 1
|
||||
|
||||
when not isBottomLayer:
|
||||
if m == 1:
|
||||
return success @[ @xs ]
|
||||
|
||||
let halfn: int = m div 2
|
||||
let n : int = 2 * halfn
|
||||
let isOdd: bool = (n != m)
|
||||
|
||||
var ys: seq[H]
|
||||
if not isOdd:
|
||||
ys = newSeq[H](halfn)
|
||||
else:
|
||||
ys = newSeq[H](halfn + 1)
|
||||
|
||||
for i in 0..<halfn:
|
||||
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
|
||||
ys[i] = ? self.compress( xs[a + 2 * i], xs[a + 2 * i + 1], key = key )
|
||||
if isOdd:
|
||||
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
|
||||
ys[halfn] = ? self.compress( xs[n], self.zero, key = key )
|
||||
|
||||
success @[ @xs ] & ? self.merkleTreeWorker(ys, isBottomLayer = false)
|
|
@ -0,0 +1,148 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/poseidon2
|
||||
import pkg/constantine/math/io/io_fields
|
||||
import pkg/constantine/platforms/abstractions
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../utils
|
||||
import ../rng
|
||||
|
||||
import ./merkletree
|
||||
|
||||
export merkletree, poseidon2
|
||||
|
||||
const
|
||||
KeyNoneF = F.fromhex("0x0")
|
||||
KeyBottomLayerF = F.fromhex("0x1")
|
||||
KeyOddF = F.fromhex("0x2")
|
||||
KeyOddAndBottomLayerF = F.fromhex("0x3")
|
||||
|
||||
Poseidon2Zero* = zero
|
||||
|
||||
type
|
||||
Bn254Fr* = F
|
||||
Poseidon2Hash* = Bn254Fr
|
||||
|
||||
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
|
||||
KeyNone
|
||||
KeyBottomLayer
|
||||
KeyOdd
|
||||
KeyOddAndBottomLayer
|
||||
|
||||
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
|
||||
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
|
||||
|
||||
proc `$`*(self: Poseidon2Tree): string =
|
||||
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||
"Poseidon2Tree(" &
|
||||
" root: " & root &
|
||||
", leavesCount: " & $self.leavesCount &
|
||||
", levels: " & $self.levels & " )"
|
||||
|
||||
proc `$`*(self: Poseidon2Proof): string =
|
||||
"Poseidon2Proof(" &
|
||||
" nleaves: " & $self.nleaves &
|
||||
", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt( it.toHex ) & " )"
|
||||
|
||||
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
||||
result[0..<bytes.len] = bytes[0..<bytes.len]
|
||||
|
||||
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||
case key:
|
||||
of KeyNone: KeyNoneF
|
||||
of KeyBottomLayer: KeyBottomLayerF
|
||||
of KeyOdd: KeyOddF
|
||||
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Tree,
|
||||
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash,
|
||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress( x, y, key.toKey )
|
||||
|
||||
var
|
||||
self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||
|
||||
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Tree,
|
||||
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||
Poseidon2Tree.init(
|
||||
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
|
||||
|
||||
proc fromNodes*(
|
||||
_: type Poseidon2Tree,
|
||||
nodes: openArray[Poseidon2Hash],
|
||||
nleaves: int): ?!Poseidon2Tree =
|
||||
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash,
|
||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress( x, y, key.toKey )
|
||||
|
||||
var
|
||||
self = Poseidon2Tree(compress: compressor, zero: zero)
|
||||
layer = nleaves
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add( nodes[pos..<(pos + layer)] )
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
proof = ? self.getProof(index)
|
||||
|
||||
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
|
||||
return failure "Unable to verify tree built from nodes"
|
||||
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Proof,
|
||||
index: int,
|
||||
nleaves: int,
|
||||
nodes: openArray[Poseidon2Hash]): ?!Poseidon2Proof =
|
||||
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash,
|
||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress( x, y, key.toKey )
|
||||
|
||||
success Poseidon2Proof(
|
||||
compress: compressor,
|
||||
zero: Poseidon2Zero,
|
||||
index: index,
|
||||
nleaves: nleaves,
|
||||
path: @nodes)
|
|
@ -7,13 +7,18 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/os
|
||||
|
||||
const
|
||||
CodexRepoNamespace* = "/repo" # repository namespace, blocks and manifests are subkeys
|
||||
CodexBlocksNamespace* = CodexRepoNamespace / "blocks" # blocks namespace
|
||||
CodexManifestNamespace* = CodexRepoNamespace / "manifests" # manifest namespace
|
||||
CodexBlocksPersistNamespace* = # Cid's of persisted blocks goes here
|
||||
CodexMetaNamespace / "blocks" / "persist"
|
||||
# Namespaces
|
||||
CodexMetaNamespace* = "meta" # meta info stored here
|
||||
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
|
||||
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
||||
CodexBlocksTtlNamespace* = # Cid TTL
|
||||
CodexMetaNamespace / "blocks" / "ttl"
|
||||
CodexMetaNamespace & "/ttl"
|
||||
CodexBlockProofNamespace* = # Cid and Proof
|
||||
CodexMetaNamespace & "/proof"
|
||||
CodexDhtNamespace* = "dht" # Dht namespace
|
||||
CodexDhtProvidersNamespace* = # Dht providers namespace
|
||||
CodexDhtNamespace & "/providers"
|
||||
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace
|
||||
|
|
925
codex/node.nim
925
codex/node.nim
File diff suppressed because it is too large
Load Diff
|
@ -1,79 +0,0 @@
|
|||
import std/sets
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/chronicles
|
||||
import ./storageproofs
|
||||
import ./clock
|
||||
|
||||
export sets
|
||||
export storageproofs
|
||||
|
||||
type
|
||||
Proving* = ref object
|
||||
proofs: Proofs
|
||||
clock: Clock
|
||||
loop: ?Future[void]
|
||||
slots*: HashSet[SlotId]
|
||||
onProofRequired: ?OnProofRequired
|
||||
OnProofRequired* = proc (id: SlotId) {.gcsafe, upraises:[].}
|
||||
|
||||
func new*(_: type Proving, proofs: Proofs, clock: Clock): Proving =
|
||||
Proving(proofs: proofs, clock: clock)
|
||||
|
||||
proc `onProofRequired=`*(proving: Proving, callback: OnProofRequired) =
|
||||
proving.onProofRequired = some callback
|
||||
|
||||
func add*(proving: Proving, id: SlotId) =
|
||||
proving.slots.incl(id)
|
||||
|
||||
proc getCurrentPeriod(proving: Proving): Future[Period] {.async.} =
|
||||
let periodicity = await proving.proofs.periodicity()
|
||||
return periodicity.periodOf(proving.clock.now().u256)
|
||||
|
||||
proc waitUntilPeriod(proving: Proving, period: Period) {.async.} =
|
||||
let periodicity = await proving.proofs.periodicity()
|
||||
await proving.clock.waitUntil(periodicity.periodStart(period).truncate(int64))
|
||||
|
||||
proc removeEndedContracts(proving: Proving) {.async.} =
|
||||
let now = proving.clock.now().u256
|
||||
var ended: HashSet[SlotId]
|
||||
for id in proving.slots:
|
||||
if now >= (await proving.proofs.getProofEnd(id)):
|
||||
ended.incl(id)
|
||||
proving.slots.excl(ended)
|
||||
|
||||
proc run(proving: Proving) {.async.} =
|
||||
try:
|
||||
while true:
|
||||
let currentPeriod = await proving.getCurrentPeriod()
|
||||
await proving.removeEndedContracts()
|
||||
for id in proving.slots:
|
||||
if (await proving.proofs.isProofRequired(id)) or
|
||||
(await proving.proofs.willProofBeRequired(id)):
|
||||
if callback =? proving.onProofRequired:
|
||||
callback(id)
|
||||
await proving.waitUntilPeriod(currentPeriod + 1)
|
||||
except CancelledError:
|
||||
discard
|
||||
except CatchableError as e:
|
||||
error "Proving failed", msg = e.msg
|
||||
|
||||
proc start*(proving: Proving) {.async.} =
|
||||
if proving.loop.isSome:
|
||||
return
|
||||
|
||||
proving.loop = some proving.run()
|
||||
|
||||
proc stop*(proving: Proving) {.async.} =
|
||||
if loop =? proving.loop:
|
||||
proving.loop = Future[void].none
|
||||
if not loop.finished:
|
||||
await loop.cancelAndWait()
|
||||
|
||||
proc submitProof*(proving: Proving, id: SlotId, proof: seq[byte]) {.async.} =
|
||||
await proving.proofs.submitProof(id, proof)
|
||||
|
||||
proc subscribeProofSubmission*(proving: Proving,
|
||||
callback: OnProofSubmitted):
|
||||
Future[Subscription] =
|
||||
proving.proofs.subscribeProofSubmission(callback)
|
|
@ -18,18 +18,15 @@ type
|
|||
clock: Clock
|
||||
purchases: Table[PurchaseId, Purchase]
|
||||
proofProbability*: UInt256
|
||||
requestExpiryInterval*: UInt256
|
||||
PurchaseTimeout* = Timeout
|
||||
|
||||
const DefaultProofProbability = 100.u256
|
||||
const DefaultRequestExpiryInterval = (10 * 60).u256
|
||||
|
||||
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
||||
Purchasing(
|
||||
market: market,
|
||||
clock: clock,
|
||||
proofProbability: DefaultProofProbability,
|
||||
requestExpiryInterval: DefaultRequestExpiryInterval,
|
||||
)
|
||||
|
||||
proc load*(purchasing: Purchasing) {.async.} =
|
||||
|
@ -47,12 +44,11 @@ proc stop*(purchasing: Purchasing) {.async.} =
|
|||
discard
|
||||
|
||||
proc populate*(purchasing: Purchasing,
|
||||
request: StorageRequest): Future[StorageRequest] {.async.} =
|
||||
request: StorageRequest
|
||||
): Future[StorageRequest] {.async.} =
|
||||
result = request
|
||||
if result.ask.proofProbability == 0.u256:
|
||||
result.ask.proofProbability = purchasing.proofProbability
|
||||
if result.expiry == 0.u256:
|
||||
result.expiry = (purchasing.clock.now().u256 + purchasing.requestExpiryInterval)
|
||||
if result.nonce == Nonce.default:
|
||||
var id = result.nonce.toArray
|
||||
doAssert randomBytes(id) == 32
|
||||
|
@ -60,7 +56,8 @@ proc populate*(purchasing: Purchasing,
|
|||
result.client = await purchasing.market.getSigner()
|
||||
|
||||
proc purchase*(purchasing: Purchasing,
|
||||
request: StorageRequest): Future[Purchase] {.async.} =
|
||||
request: StorageRequest
|
||||
): Future[Purchase] {.async.} =
|
||||
let request = await purchasing.populate(request)
|
||||
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
||||
purchase.start()
|
||||
|
@ -72,3 +69,10 @@ func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase =
|
|||
some purchasing.purchases[id]
|
||||
else:
|
||||
none Purchase
|
||||
|
||||
func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
|
||||
var pIds: seq[PurchaseId] = @[]
|
||||
for key in purchasing.purchases.keys:
|
||||
pIds.add(key)
|
||||
return pIds
|
||||
|
||||
|
|
|
@ -24,30 +24,39 @@ export Purchase
|
|||
export purchaseid
|
||||
export statemachine
|
||||
|
||||
func new*(_: type Purchase,
|
||||
requestId: RequestId,
|
||||
market: Market,
|
||||
clock: Clock): Purchase =
|
||||
Purchase(
|
||||
future: Future[void].new(),
|
||||
requestId: requestId,
|
||||
market: market,
|
||||
clock: clock
|
||||
)
|
||||
func new*(
|
||||
_: type Purchase,
|
||||
requestId: RequestId,
|
||||
market: Market,
|
||||
clock: Clock
|
||||
): Purchase =
|
||||
## create a new instance of a Purchase
|
||||
##
|
||||
var purchase = Purchase.new()
|
||||
{.cast(noSideEffect).}:
|
||||
purchase.future = newFuture[void]()
|
||||
purchase.requestId = requestId
|
||||
purchase.market = market
|
||||
purchase.clock = clock
|
||||
|
||||
func new*(_: type Purchase,
|
||||
request: StorageRequest,
|
||||
market: Market,
|
||||
clock: Clock): Purchase =
|
||||
return purchase
|
||||
|
||||
func new*(
|
||||
_: type Purchase,
|
||||
request: StorageRequest,
|
||||
market: Market,
|
||||
clock: Clock
|
||||
): Purchase =
|
||||
## Create a new purchase using the given market and clock
|
||||
let purchase = Purchase.new(request.id, market, clock)
|
||||
purchase.request = some request
|
||||
return purchase
|
||||
|
||||
proc start*(purchase: Purchase) =
|
||||
purchase.switch(PurchasePending())
|
||||
purchase.start(PurchasePending())
|
||||
|
||||
proc load*(purchase: Purchase) =
|
||||
purchase.switch(PurchaseUnknown())
|
||||
purchase.start(PurchaseUnknown())
|
||||
|
||||
proc wait*(purchase: Purchase) {.async.} =
|
||||
await purchase.future
|
||||
|
@ -63,3 +72,8 @@ func error*(purchase: Purchase): ?(ref CatchableError) =
|
|||
some purchase.future.error
|
||||
else:
|
||||
none (ref CatchableError)
|
||||
|
||||
func state*(purchase: Purchase): ?string =
|
||||
proc description(state: State): string =
|
||||
$state
|
||||
purchase.query(description)
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
import std/hashes
|
||||
import pkg/nimcrypto
|
||||
import ../logutils
|
||||
|
||||
type PurchaseId* = distinct array[32, byte]
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog
|
||||
|
||||
proc hash*(x: PurchaseId): Hash {.borrow.}
|
||||
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
||||
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
import ../utils/statemachine
|
||||
import ../utils/asyncstatemachine
|
||||
import ../market
|
||||
import ../clock
|
||||
import ../errors
|
||||
|
||||
export market
|
||||
export clock
|
||||
export statemachine
|
||||
export asyncstatemachine
|
||||
|
||||
type
|
||||
Purchase* = ref object of StateMachine
|
||||
Purchase* = ref object of Machine
|
||||
future*: Future[void]
|
||||
market*: Market
|
||||
clock*: Clock
|
||||
requestId*: RequestId
|
||||
request*: ?StorageRequest
|
||||
PurchaseState* = ref object of AsyncState
|
||||
PurchaseState* = ref object of State
|
||||
PurchaseError* = object of CodexError
|
||||
|
||||
method description*(state: PurchaseState): string {.base.} =
|
||||
raiseAssert "description not implemented for state"
|
||||
|
|
|
@ -1,20 +1,25 @@
|
|||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../statemachine
|
||||
import ./error
|
||||
import ./errorhandling
|
||||
|
||||
type PurchaseCancelled* = ref object of PurchaseState
|
||||
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
|
||||
|
||||
method enterAsync*(state: PurchaseCancelled) {.async.} =
|
||||
without purchase =? (state.context as Purchase):
|
||||
raiseAssert "invalid state"
|
||||
logScope:
|
||||
topics = "marketplace purchases cancelled"
|
||||
|
||||
try:
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
except CatchableError as error:
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
return
|
||||
type PurchaseCancelled* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseCancelled): string =
|
||||
"cancelled"
|
||||
|
||||
method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_cancelled.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
|
||||
method description*(state: PurchaseCancelled): string =
|
||||
"cancelled"
|
||||
purchase.future.fail(error)
|
||||
|
|
|
@ -1,13 +1,23 @@
|
|||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
|
||||
declareCounter(codex_purchases_error, "codex purchases error")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases errored"
|
||||
|
||||
type PurchaseErrored* = ref object of PurchaseState
|
||||
error*: ref CatchableError
|
||||
|
||||
method enter*(state: PurchaseErrored) =
|
||||
without purchase =? (state.context as Purchase):
|
||||
raiseAssert "invalid state"
|
||||
method `$`*(state: PurchaseErrored): string =
|
||||
"errored"
|
||||
|
||||
method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_error.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId
|
||||
|
||||
purchase.future.fail(state.error)
|
||||
|
||||
method description*(state: PurchaseErrored): string =
|
||||
"errored"
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
import pkg/questionable
|
||||
import ../statemachine
|
||||
import ./error
|
||||
|
||||
type
|
||||
ErrorHandlingState* = ref object of PurchaseState
|
||||
|
||||
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||
some State(PurchaseErrored(error: error))
|
|
@ -1,12 +1,16 @@
|
|||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_failed, "codex purchases failed")
|
||||
|
||||
type
|
||||
PurchaseFailed* = ref object of PurchaseState
|
||||
|
||||
method enter*(state: PurchaseFailed) =
|
||||
let error = newException(PurchaseError, "Purchase failed")
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
|
||||
method description*(state: PurchaseFailed): string =
|
||||
method `$`*(state: PurchaseFailed): string =
|
||||
"failed"
|
||||
|
||||
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_failed.inc()
|
||||
let error = newException(PurchaseError, "Purchase failed")
|
||||
return some State(PurchaseErrored(error: error))
|
||||
|
|
|
@ -1,12 +1,20 @@
|
|||
import pkg/metrics
|
||||
|
||||
import ../statemachine
|
||||
import ../../logutils
|
||||
|
||||
declareCounter(codex_purchases_finished, "codex purchases finished")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases finished"
|
||||
|
||||
type PurchaseFinished* = ref object of PurchaseState
|
||||
|
||||
method enter*(state: PurchaseFinished) =
|
||||
without purchase =? (state.context as Purchase):
|
||||
raiseAssert "invalid state"
|
||||
|
||||
purchase.future.complete()
|
||||
|
||||
method description*(state: PurchaseFinished): string =
|
||||
method `$`*(state: PurchaseFinished): string =
|
||||
"finished"
|
||||
|
||||
method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_finished.inc()
|
||||
let purchase = Purchase(machine)
|
||||
info "Purchase finished", requestId = purchase.requestId
|
||||
purchase.future.complete()
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
import ./error
|
||||
|
||||
type PurchasePending* = ref object of PurchaseState
|
||||
declareCounter(codex_purchases_pending, "codex purchases pending")
|
||||
|
||||
method enterAsync(state: PurchasePending) {.async.} =
|
||||
without purchase =? (state.context as Purchase) and
|
||||
request =? purchase.request:
|
||||
raiseAssert "invalid state"
|
||||
type PurchasePending* = ref object of ErrorHandlingState
|
||||
|
||||
try:
|
||||
await purchase.market.requestStorage(request)
|
||||
except CatchableError as error:
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
return
|
||||
|
||||
state.switch(PurchaseSubmitted())
|
||||
|
||||
method description*(state: PurchasePending): string =
|
||||
method `$`*(state: PurchasePending): string =
|
||||
"pending"
|
||||
|
||||
method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_pending.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
return some State(PurchaseSubmitted())
|
||||
|
|
|
@ -1,32 +1,41 @@
|
|||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../statemachine
|
||||
import ./error
|
||||
import ./errorhandling
|
||||
import ./finished
|
||||
import ./failed
|
||||
|
||||
type PurchaseStarted* = ref object of PurchaseState
|
||||
declareCounter(codex_purchases_started, "codex purchases started")
|
||||
|
||||
method enterAsync*(state: PurchaseStarted) {.async.} =
|
||||
without purchase =? (state.context as Purchase):
|
||||
raiseAssert "invalid state"
|
||||
logScope:
|
||||
topics = "marketplace purchases started"
|
||||
|
||||
type PurchaseStarted* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseStarted): string =
|
||||
"started"
|
||||
|
||||
method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_started.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
let clock = purchase.clock
|
||||
let market = purchase.market
|
||||
info "All required slots filled, purchase started", requestId = purchase.requestId
|
||||
|
||||
let failed = newFuture[void]()
|
||||
proc callback(_: RequestId) =
|
||||
failed.complete()
|
||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||
|
||||
let ended = clock.waitUntil(await market.getRequestEnd(purchase.requestId))
|
||||
try:
|
||||
let fut = await one(ended, failed)
|
||||
if fut.id == failed.id:
|
||||
state.switch(PurchaseFailed())
|
||||
else:
|
||||
state.switch(PurchaseFinished())
|
||||
await subscription.unsubscribe()
|
||||
except CatchableError as error:
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
|
||||
method description*(state: PurchaseStarted): string =
|
||||
"started"
|
||||
# Ensure that we're past the request end by waiting an additional second
|
||||
let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||
let fut = await one(ended, failed)
|
||||
await subscription.unsubscribe()
|
||||
if fut.id == failed.id:
|
||||
ended.cancel()
|
||||
return some State(PurchaseFailed())
|
||||
else:
|
||||
failed.cancel()
|
||||
return some State(PurchaseFinished())
|
||||
|
|
|
@ -1,18 +1,30 @@
|
|||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../statemachine
|
||||
import ./error
|
||||
import ./errorhandling
|
||||
import ./started
|
||||
import ./cancelled
|
||||
|
||||
type PurchaseSubmitted* = ref object of PurchaseState
|
||||
logScope:
|
||||
topics = "marketplace purchases submitted"
|
||||
|
||||
method enterAsync(state: PurchaseSubmitted) {.async.} =
|
||||
without purchase =? (state.context as Purchase) and
|
||||
request =? purchase.request:
|
||||
raiseAssert "invalid state"
|
||||
declareCounter(codex_purchases_submitted, "codex purchases submitted")
|
||||
|
||||
type PurchaseSubmitted* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseSubmitted): string =
|
||||
"submitted"
|
||||
|
||||
method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_submitted.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
let market = purchase.market
|
||||
let clock = purchase.clock
|
||||
|
||||
info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId
|
||||
|
||||
proc wait {.async.} =
|
||||
let done = newFuture[void]()
|
||||
proc callback(_: RequestId) =
|
||||
|
@ -22,19 +34,13 @@ method enterAsync(state: PurchaseSubmitted) {.async.} =
|
|||
await subscription.unsubscribe()
|
||||
|
||||
proc withTimeout(future: Future[void]) {.async.} =
|
||||
let expiry = request.expiry.truncate(int64)
|
||||
let expiry = (await market.requestExpiresAt(request.id)) + 1
|
||||
trace "waiting for request fulfillment or expiry", expiry
|
||||
await future.withTimeout(clock, expiry)
|
||||
|
||||
try:
|
||||
await wait().withTimeout()
|
||||
except Timeout:
|
||||
state.switch(PurchaseCancelled())
|
||||
return
|
||||
except CatchableError as error:
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
return
|
||||
return some State(PurchaseCancelled())
|
||||
|
||||
state.switch(PurchaseStarted())
|
||||
|
||||
method description*(state: PurchaseSubmitted): string =
|
||||
"submitted"
|
||||
return some State(PurchaseStarted())
|
||||
|
|
|
@ -1,37 +1,35 @@
|
|||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
type PurchaseUnknown* = ref object of PurchaseState
|
||||
declareCounter(codex_purchases_unknown, "codex purchases unknown")
|
||||
|
||||
method enterAsync(state: PurchaseUnknown) {.async.} =
|
||||
without purchase =? (state.context as Purchase):
|
||||
raiseAssert "invalid state"
|
||||
type PurchaseUnknown* = ref object of ErrorHandlingState
|
||||
|
||||
try:
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.getState(purchase.requestId)):
|
||||
|
||||
purchase.request = some request
|
||||
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
state.switch(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
state.switch(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
state.switch(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
state.switch(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
state.switch(PurchaseFailed())
|
||||
|
||||
except CatchableError as error:
|
||||
state.switch(PurchaseErrored(error: error))
|
||||
|
||||
method description*(state: PurchaseUnknown): string =
|
||||
method `$`*(state: PurchaseUnknown): string =
|
||||
"unknown"
|
||||
|
||||
method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_unknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
|
||||
purchase.request = some request
|
||||
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
return some State(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
return some State(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
return some State(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
return some State(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
return some State(PurchaseFailed())
|
||||
|
|
|
@ -13,26 +13,30 @@ push: {.upraises: [].}
|
|||
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/presto
|
||||
import pkg/libp2p
|
||||
import pkg/presto except toJson
|
||||
import pkg/metrics except toJson
|
||||
import pkg/stew/base10
|
||||
import pkg/stew/byteutils
|
||||
import pkg/confutils
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2pdht/discv5/spr as spr
|
||||
import pkg/codexdht/discv5/spr as spr
|
||||
|
||||
import ../logutils
|
||||
import ../node
|
||||
import ../blocktype
|
||||
import ../conf
|
||||
import ../contracts
|
||||
import ../streams
|
||||
import ../erasure/erasure
|
||||
import ../manifest
|
||||
import ../streams/asyncstreamwrapper
|
||||
import ../stores
|
||||
import ../utils/options
|
||||
|
||||
import ./coders
|
||||
import ./json
|
||||
|
@ -40,130 +44,91 @@ import ./json
|
|||
logScope:
|
||||
topics = "codex restapi"
|
||||
|
||||
declareCounter(codex_api_uploads, "codex API uploads")
|
||||
declareCounter(codex_api_downloads, "codex API downloads")
|
||||
|
||||
proc validate(
|
||||
pattern: string,
|
||||
value: string): int
|
||||
{.gcsafe, raises: [Defect].} =
|
||||
0
|
||||
|
||||
proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
||||
var router = RestRouter.init(validate)
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/connect/{peerId}") do (
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]) -> RestApiResponse:
|
||||
## Connect to a peer
|
||||
##
|
||||
## If `addrs` param is supplied, it will be used to
|
||||
## dial the peer, otherwise the `peerId` is used
|
||||
## to invoke peer discovery, if it succeeds
|
||||
## the returned addresses will be used to dial
|
||||
##
|
||||
proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
|
||||
var content: seq[RestContent]
|
||||
|
||||
if peerId.isErr:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
$peerId.error())
|
||||
proc formatManifest(cid: Cid, manifest: Manifest) =
|
||||
let restContent = RestContent.init(cid, manifest)
|
||||
content.add(restContent)
|
||||
|
||||
let addresses = if addrs.isOk and addrs.get().len > 0:
|
||||
addrs.get()
|
||||
else:
|
||||
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
"Unable to find Peer!")
|
||||
peerRecord.addresses.mapIt(it.address)
|
||||
try:
|
||||
await node.connect(peerId.get(), addresses)
|
||||
return RestApiResponse.response("Successfully connected to peer")
|
||||
except DialFailedError as e:
|
||||
return RestApiResponse.error(Http400, "Unable to dial peer")
|
||||
except CatchableError as e:
|
||||
return RestApiResponse.error(Http400, "Unknown error dialling peer")
|
||||
await node.iterateManifests(formatManifest)
|
||||
return %RestContentList.init(content)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/download/{id}") do (
|
||||
id: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
||||
## Download a file from the node in a streaming
|
||||
## manner
|
||||
##
|
||||
proc retrieveCid(
|
||||
node: CodexNodeRef,
|
||||
cid: Cid,
|
||||
local: bool = true,
|
||||
resp: HttpResponseRef): Future[RestApiResponse] {.async.} =
|
||||
## Download a file from the node in a streaming
|
||||
## manner
|
||||
##
|
||||
|
||||
if id.isErr:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
$id.error())
|
||||
|
||||
var
|
||||
stream: LPStream
|
||||
|
||||
var bytes = 0
|
||||
try:
|
||||
without stream =? (await node.retrieve(id.get())), error:
|
||||
return RestApiResponse.error(Http404, error.msg)
|
||||
|
||||
resp.addHeader("Content-Type", "application/octet-stream")
|
||||
await resp.prepareChunked()
|
||||
|
||||
while not stream.atEof:
|
||||
var
|
||||
buff = newSeqUninitialized[byte](BlockSize)
|
||||
len = await stream.readOnce(addr buff[0], buff.len)
|
||||
|
||||
buff.setLen(len)
|
||||
if buff.len <= 0:
|
||||
break
|
||||
|
||||
bytes += buff.len
|
||||
trace "Sending chunk", size = buff.len
|
||||
await resp.sendChunk(addr buff[0], buff.len)
|
||||
await resp.finish()
|
||||
except CatchableError as exc:
|
||||
trace "Excepting streaming blocks", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
finally:
|
||||
trace "Sent bytes", cid = id.get(), bytes
|
||||
if not stream.isNil:
|
||||
await stream.close()
|
||||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
||||
## Create a request for storage
|
||||
##
|
||||
## cid - the cid of a previously uploaded dataset
|
||||
## duration - the duration of the contract
|
||||
## reward - the maximum price the client is willing to pay
|
||||
|
||||
without cid =? cid.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let body = await request.getBody()
|
||||
|
||||
without params =? StorageRequestParams.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let nodes = params.nodes |? 1
|
||||
let tolerance = params.nodes |? 0
|
||||
|
||||
without purchaseId =? await node.requestStorage(
|
||||
cid,
|
||||
params.duration,
|
||||
nodes,
|
||||
tolerance,
|
||||
params.reward,
|
||||
params.expiry), error:
|
||||
var
|
||||
stream: LPStream
|
||||
|
||||
var bytes = 0
|
||||
try:
|
||||
without stream =? (await node.retrieve(cid, local)), error:
|
||||
if error of BlockNotFoundError:
|
||||
return RestApiResponse.error(Http404, error.msg)
|
||||
else:
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
return RestApiResponse.response(purchaseId.toHex)
|
||||
resp.addHeader("Content-Type", "application/octet-stream")
|
||||
await resp.prepareChunked()
|
||||
|
||||
while not stream.atEof:
|
||||
var
|
||||
buff = newSeqUninitialized[byte](DefaultBlockSize.int)
|
||||
len = await stream.readOnce(addr buff[0], buff.len)
|
||||
|
||||
buff.setLen(len)
|
||||
if buff.len <= 0:
|
||||
break
|
||||
|
||||
bytes += buff.len
|
||||
await resp.sendChunk(addr buff[0], buff.len)
|
||||
await resp.finish()
|
||||
codex_api_downloads.inc()
|
||||
except CatchableError as exc:
|
||||
warn "Excepting streaming blocks", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
finally:
|
||||
info "Sent bytes", cid = cid, bytes
|
||||
if not stream.isNil:
|
||||
await stream.close()
|
||||
|
||||
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
|
||||
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
|
||||
|
||||
router.api(
|
||||
MethodOptions,
|
||||
"/api/codex/v1/data") do (
|
||||
resp: HttpResponseRef) -> RestApiResponse:
|
||||
|
||||
if corsOrigin =? allowedOrigin:
|
||||
resp.setHeader("Access-Control-Allow-Origin", corsOrigin)
|
||||
resp.setHeader("Access-Control-Allow-Methods", "POST, OPTIONS")
|
||||
resp.setHeader("Access-Control-Allow-Headers", "content-type")
|
||||
resp.setHeader("Access-Control-Max-Age", "86400")
|
||||
|
||||
resp.status = Http204
|
||||
await resp.sendBody("")
|
||||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/codex/v1/upload") do (
|
||||
"/api/codex/v1/data") do (
|
||||
) -> RestApiResponse:
|
||||
## Upload a file in a streamming manner
|
||||
## Upload a file in a streaming manner
|
||||
##
|
||||
|
||||
trace "Handling file upload"
|
||||
|
@ -186,18 +151,487 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
|||
trace "Error uploading file", exc = error.msg
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
codex_api_uploads.inc()
|
||||
trace "Uploaded file", cid
|
||||
return RestApiResponse.response($cid)
|
||||
except CancelledError as exc:
|
||||
except CancelledError:
|
||||
trace "Upload cancelled error"
|
||||
return RestApiResponse.error(Http500)
|
||||
except AsyncStreamError:
|
||||
trace "Async stream error"
|
||||
return RestApiResponse.error(Http500)
|
||||
finally:
|
||||
await reader.closeWait()
|
||||
|
||||
# if we got here something went wrong?
|
||||
trace "Something went wrong error"
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/data") do () -> RestApiResponse:
|
||||
let json = await formatManifestBlocks(node)
|
||||
return RestApiResponse.response($json, contentType="application/json")
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/data/{cid}") do (
|
||||
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
||||
## Download a file from the local node in a streaming
|
||||
## manner
|
||||
if cid.isErr:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
$cid.error())
|
||||
|
||||
if corsOrigin =? allowedOrigin:
|
||||
resp.setHeader("Access-Control-Allow-Origin", corsOrigin)
|
||||
resp.setHeader("Access-Control-Allow-Methods", "GET, OPTIONS")
|
||||
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||
resp.setHeader("Access-Control-Max-Age", "86400")
|
||||
|
||||
await node.retrieveCid(cid.get(), local = true, resp=resp)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/data/{cid}/network") do (
|
||||
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
||||
## Download a file from the network in a streaming
|
||||
## manner
|
||||
##
|
||||
|
||||
if cid.isErr:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
$cid.error())
|
||||
|
||||
if corsOrigin =? allowedOrigin:
|
||||
resp.setHeader("Access-Control-Allow-Origin", corsOrigin)
|
||||
resp.setHeader("Access-Control-Allow-Methods", "GET, OPTIONS")
|
||||
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||
resp.setHeader("Access-Control-Max-Age", "86400")
|
||||
|
||||
await node.retrieveCid(cid.get(), local = false, resp=resp)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/space") do () -> RestApiResponse:
|
||||
let json = % RestRepoStore(
|
||||
totalBlocks: repoStore.totalBlocks,
|
||||
quotaMaxBytes: repoStore.quotaMaxBytes,
|
||||
quotaUsedBytes: repoStore.quotaUsedBytes,
|
||||
quotaReservedBytes: repoStore.quotaReservedBytes
|
||||
)
|
||||
return RestApiResponse.response($json, contentType="application/json")
|
||||
|
||||
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/sales/slots") do () -> RestApiResponse:
|
||||
## Returns active slots for the host
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
let json = %(await contracts.sales.mySlots())
|
||||
return RestApiResponse.response($json, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/sales/slots/{slotId}") do (slotId: SlotId) -> RestApiResponse:
|
||||
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
||||
## slot is not active for the host.
|
||||
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
without slotId =? slotId.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
without agent =? await contracts.sales.activeSale(slotId):
|
||||
return RestApiResponse.error(Http404, "Provider not filling slot")
|
||||
|
||||
let restAgent = RestSalesAgent(
|
||||
state: agent.state() |? "none",
|
||||
slotIndex: agent.data.slotIndex,
|
||||
requestId: agent.data.requestId
|
||||
)
|
||||
|
||||
return RestApiResponse.response(restAgent.toJson, contentType="application/json")
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||
## Returns storage that is for sale
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
without avails =? (await contracts.sales.context.reservations.all(Availability)), err:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
|
||||
let json = %avails
|
||||
return RestApiResponse.response($json, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||
## Add available storage to sell.
|
||||
## Every time Availability's offer finishes, its capacity is returned to the availability.
|
||||
##
|
||||
## totalSize - size of available storage in bytes
|
||||
## duration - maximum time the storage should be sold for (in seconds)
|
||||
## minPrice - minimum price to be paid (in amount of tokens)
|
||||
## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
let body = await request.getBody()
|
||||
|
||||
without restAv =? RestAvailability.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let reservations = contracts.sales.context.reservations
|
||||
|
||||
if restAv.totalSize == 0:
|
||||
return RestApiResponse.error(Http400, "Total size must be larger then zero")
|
||||
|
||||
if not reservations.hasAvailable(restAv.totalSize.truncate(uint)):
|
||||
return RestApiResponse.error(Http422, "Not enough storage quota")
|
||||
|
||||
without availability =? (
|
||||
await reservations.createAvailability(
|
||||
restAv.totalSize,
|
||||
restAv.duration,
|
||||
restAv.minPrice,
|
||||
restAv.maxCollateral)
|
||||
), error:
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
return RestApiResponse.response(availability.toJson,
|
||||
Http201,
|
||||
contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.rawApi(
|
||||
MethodPatch,
|
||||
"/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId) -> RestApiResponse:
|
||||
## Updates Availability.
|
||||
## The new parameters will be only considered for new requests.
|
||||
## Existing Requests linked to this Availability will continue as is.
|
||||
##
|
||||
## totalSize - size of available storage in bytes. When decreasing the size, then lower limit is the currently `totalSize - freeSize`.
|
||||
## duration - maximum time the storage should be sold for (in seconds)
|
||||
## minPrice - minimum price to be paid (in amount of tokens)
|
||||
## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
without keyId =? id.key.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let
|
||||
body = await request.getBody()
|
||||
reservations = contracts.sales.context.reservations
|
||||
|
||||
type OptRestAvailability = Optionalize(RestAvailability)
|
||||
without restAv =? OptRestAvailability.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
without availability =? (await reservations.get(keyId, Availability)), error:
|
||||
if error of NotExistsError:
|
||||
return RestApiResponse.error(Http404, "Availability not found")
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
if isSome restAv.freeSize:
|
||||
return RestApiResponse.error(Http400, "Updating freeSize is not allowed")
|
||||
|
||||
if size =? restAv.totalSize:
|
||||
# we don't allow lowering the totalSize bellow currently utilized size
|
||||
if size < (availability.totalSize - availability.freeSize):
|
||||
return RestApiResponse.error(Http400, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize))
|
||||
|
||||
availability.freeSize += size - availability.totalSize
|
||||
availability.totalSize = size
|
||||
|
||||
if duration =? restAv.duration:
|
||||
availability.duration = duration
|
||||
|
||||
if minPrice =? restAv.minPrice:
|
||||
availability.minPrice = minPrice
|
||||
|
||||
if maxCollateral =? restAv.maxCollateral:
|
||||
availability.maxCollateral = maxCollateral
|
||||
|
||||
if err =? (await reservations.update(availability)).errorOption:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
|
||||
return RestApiResponse.response(Http200)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.rawApi(
|
||||
MethodGet,
|
||||
"/api/codex/v1/sales/availability/{id}/reservations") do (id: AvailabilityId) -> RestApiResponse:
|
||||
## Gets Availability's reservations.
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
without keyId =? id.key.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let reservations = contracts.sales.context.reservations
|
||||
|
||||
if error =? (await reservations.get(keyId, Availability)).errorOption:
|
||||
if error of NotExistsError:
|
||||
return RestApiResponse.error(Http404, "Availability not found")
|
||||
else:
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
without availabilitysReservations =? (await reservations.all(Reservation, id)), err:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
|
||||
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
|
||||
return RestApiResponse.response(availabilitysReservations.toJson, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
||||
## Create a request for storage
|
||||
##
|
||||
## cid - the cid of a previously uploaded dataset
|
||||
## duration - the duration of the request in seconds
|
||||
## proofProbability - how often storage proofs are required
|
||||
## reward - the maximum amount of tokens paid per second per slot to hosts the client is willing to pay
|
||||
## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data
|
||||
## nodes - number of nodes the content should be stored on
|
||||
## tolerance - allowed number of nodes that can be lost before content is lost
|
||||
## colateral - requested collateral from hosts when they fill slot
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.client:
|
||||
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||
|
||||
without cid =? cid.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let body = await request.getBody()
|
||||
|
||||
without params =? StorageRequestParams.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let nodes = params.nodes |? 1
|
||||
let tolerance = params.tolerance |? 0
|
||||
|
||||
# prevent underflow
|
||||
if tolerance > nodes:
|
||||
return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`")
|
||||
|
||||
let ecK = nodes - tolerance
|
||||
let ecM = tolerance # for readability
|
||||
|
||||
# ensure leopard constrainst of 1 < K ≥ M
|
||||
if ecK <= 1 or ecK < ecM:
|
||||
return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`")
|
||||
|
||||
without expiry =? params.expiry:
|
||||
return RestApiResponse.error(Http400, "Expiry required")
|
||||
|
||||
if expiry <= 0 or expiry >= params.duration:
|
||||
return RestApiResponse.error(Http400, "Expiry needs value bigger then zero and smaller then the request's duration")
|
||||
|
||||
without purchaseId =? await node.requestStorage(
|
||||
cid,
|
||||
params.duration,
|
||||
params.proofProbability,
|
||||
nodes,
|
||||
tolerance,
|
||||
params.reward,
|
||||
params.collateral,
|
||||
expiry), error:
|
||||
|
||||
if error of InsufficientBlocksError:
|
||||
return RestApiResponse.error(Http400,
|
||||
"Dataset too small for erasure parameters, need at least " &
|
||||
$(ref InsufficientBlocksError)(error).minSize.int & " bytes")
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
return RestApiResponse.response(purchaseId.toHex)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/storage/purchases/{id}") do (
|
||||
id: PurchaseId) -> RestApiResponse:
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.client:
|
||||
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
without purchase =? contracts.purchasing.getPurchase(id):
|
||||
return RestApiResponse.error(Http404)
|
||||
|
||||
let json = % RestPurchase(
|
||||
state: purchase.state |? "none",
|
||||
error: purchase.error.?msg,
|
||||
request: purchase.request,
|
||||
requestId: purchase.requestId
|
||||
)
|
||||
|
||||
return RestApiResponse.response($json, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/storage/purchases") do () -> RestApiResponse:
|
||||
try:
|
||||
without contracts =? node.contracts.client:
|
||||
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||
|
||||
let purchaseIds = contracts.purchasing.getPurchaseIds()
|
||||
return RestApiResponse.response($ %purchaseIds, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||
## various node management api's
|
||||
##
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/spr") do () -> RestApiResponse:
|
||||
## Returns node SPR in requested format, json or text.
|
||||
##
|
||||
try:
|
||||
without spr =? node.discovery.dhtRecord:
|
||||
return RestApiResponse.response("", status=Http503, contentType="application/json")
|
||||
|
||||
if $preferredContentType().get() == "text/plain":
|
||||
return RestApiResponse.response(spr.toURI, contentType="text/plain")
|
||||
else:
|
||||
return RestApiResponse.response($ %* {"spr": spr.toURI}, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/peerid") do () -> RestApiResponse:
|
||||
## Returns node's peerId in requested format, json or text.
|
||||
##
|
||||
try:
|
||||
let id = $node.switch.peerInfo.peerId
|
||||
|
||||
if $preferredContentType().get() == "text/plain":
|
||||
return RestApiResponse.response(id, contentType="text/plain")
|
||||
else:
|
||||
return RestApiResponse.response($ %* {"id": id}, contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/connect/{peerId}") do (
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress]) -> RestApiResponse:
|
||||
## Connect to a peer
|
||||
##
|
||||
## If `addrs` param is supplied, it will be used to
|
||||
## dial the peer, otherwise the `peerId` is used
|
||||
## to invoke peer discovery, if it succeeds
|
||||
## the returned addresses will be used to dial
|
||||
##
|
||||
## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs`
|
||||
##
|
||||
|
||||
if peerId.isErr:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
$peerId.error())
|
||||
|
||||
let addresses = if addrs.isOk and addrs.get().len > 0:
|
||||
addrs.get()
|
||||
else:
|
||||
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
"Unable to find Peer!")
|
||||
peerRecord.addresses.mapIt(it.address)
|
||||
try:
|
||||
await node.connect(peerId.get(), addresses)
|
||||
return RestApiResponse.response("Successfully connected to peer")
|
||||
except DialFailedError:
|
||||
return RestApiResponse.error(Http400, "Unable to dial peer")
|
||||
except CatchableError:
|
||||
return RestApiResponse.error(Http500, "Unknown error dialling peer")
|
||||
|
||||
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/debug/info") do () -> RestApiResponse:
|
||||
## Print rudimentary node information
|
||||
##
|
||||
|
||||
try:
|
||||
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
|
||||
|
||||
let
|
||||
json = %*{
|
||||
"id": $node.switch.peerInfo.peerId,
|
||||
"addrs": node.switch.peerInfo.addrs.mapIt( $it ),
|
||||
"repo": $conf.dataDir,
|
||||
"spr":
|
||||
if node.discovery.dhtRecord.isSome:
|
||||
node.discovery.dhtRecord.get.toURI
|
||||
else:
|
||||
"",
|
||||
"announceAddresses": node.discovery.announceAddrs,
|
||||
"table": table,
|
||||
"codex": {
|
||||
"version": $codexVersion,
|
||||
"revision": $codexRevision
|
||||
}
|
||||
}
|
||||
|
||||
# return pretty json for human readability
|
||||
return RestApiResponse.response(json.pretty(), contentType="application/json")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.api(
|
||||
MethodPost,
|
||||
"/api/codex/v1/debug/chronicles/loglevel") do (
|
||||
|
@ -209,87 +643,53 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
|||
## `level` - chronicles log level
|
||||
##
|
||||
|
||||
without res =? level and level =? res:
|
||||
return RestApiResponse.error(Http400, "Missing log level")
|
||||
try:
|
||||
without res =? level and level =? res:
|
||||
return RestApiResponse.error(Http400, "Missing log level")
|
||||
|
||||
try:
|
||||
{.gcsafe.}:
|
||||
updateLogLevel(level)
|
||||
except CatchableError as exc:
|
||||
return RestApiResponse.error(Http500, exc.msg)
|
||||
|
||||
return RestApiResponse.response("")
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
when codex_enable_api_debug_peers:
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/debug/peer/{peerId}") do (peerId: PeerId) -> RestApiResponse:
|
||||
|
||||
try:
|
||||
{.gcsafe.}:
|
||||
updateLogLevel(level)
|
||||
trace "debug/peer start"
|
||||
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||
trace "debug/peer peer not found!"
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
"Unable to find Peer!")
|
||||
|
||||
let json = %RestPeerRecord.init(peerRecord)
|
||||
trace "debug/peer returning peer record"
|
||||
return RestApiResponse.response($json)
|
||||
except CatchableError as exc:
|
||||
return RestApiResponse.error(Http500, exc.msg)
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
return RestApiResponse.response("")
|
||||
proc initRestApi*(
|
||||
node: CodexNodeRef,
|
||||
conf: CodexConf,
|
||||
repoStore: RepoStore,
|
||||
corsAllowedOrigin: ?string): RestRouter =
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/debug/info") do () -> RestApiResponse:
|
||||
## Print rudimentary node information
|
||||
##
|
||||
|
||||
let
|
||||
json = %*{
|
||||
"id": $node.switch.peerInfo.peerId,
|
||||
"addrs": node.switch.peerInfo.addrs.mapIt( $it ),
|
||||
"repo": $conf.dataDir,
|
||||
"spr":
|
||||
if node.discovery.dhtRecord.isSome:
|
||||
node.discovery.dhtRecord.get.toURI
|
||||
else:
|
||||
""
|
||||
}
|
||||
|
||||
return RestApiResponse.response($json)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||
## Returns storage that is for sale
|
||||
|
||||
without contracts =? node.contracts:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
let json = %contracts.sales.available
|
||||
return RestApiResponse.response($json)
|
||||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||
## Add available storage to sell
|
||||
##
|
||||
## size - size of available storage in bytes
|
||||
## duration - maximum time the storage should be sold for (in seconds)
|
||||
## minPrice - minimum price to be paid (in amount of tokens)
|
||||
|
||||
without contracts =? node.contracts:
|
||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||
|
||||
let body = await request.getBody()
|
||||
|
||||
without availability =? Availability.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
contracts.sales.add(availability)
|
||||
|
||||
let json = %availability
|
||||
return RestApiResponse.response($json)
|
||||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/codex/v1/storage/purchases/{id}") do (
|
||||
id: PurchaseId) -> RestApiResponse:
|
||||
|
||||
without contracts =? node.contracts:
|
||||
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
without purchase =? contracts.purchasing.getPurchase(id):
|
||||
return RestApiResponse.error(Http404)
|
||||
|
||||
let json = %purchase
|
||||
|
||||
return RestApiResponse.response($json)
|
||||
var router = RestRouter.init(validate, corsAllowedOrigin)
|
||||
|
||||
initDataApi(node, repoStore, router)
|
||||
initSalesApi(node, router)
|
||||
initPurchasingApi(node, router)
|
||||
initNodeApi(node, conf, router)
|
||||
initDebugApi(node, conf, router)
|
||||
|
||||
return router
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue