Merge branch 'master' into batch-utils
This commit is contained in:
commit
f351a44d8b
|
@ -0,0 +1,6 @@
|
||||||
|
.github
|
||||||
|
build
|
||||||
|
docs
|
||||||
|
metrics
|
||||||
|
nimcache
|
||||||
|
tests
|
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
|
@ -9,17 +9,26 @@ inputs:
|
||||||
cpu:
|
cpu:
|
||||||
description: "CPU to build for"
|
description: "CPU to build for"
|
||||||
default: "amd64"
|
default: "amd64"
|
||||||
nim_branch:
|
nim_version:
|
||||||
description: "Nim version"
|
description: "Nim version"
|
||||||
default: "version-1-6"
|
default: "version-1-6"
|
||||||
|
rust_version:
|
||||||
|
description: "Rust version"
|
||||||
|
default: "1.78.0"
|
||||||
shell:
|
shell:
|
||||||
description: "Shell to run commands in"
|
description: "Shell to run commands in"
|
||||||
default: "bash --noprofile --norc -e -o pipefail"
|
default: "bash --noprofile --norc -e -o pipefail"
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: APT (Linux amd64)
|
- name: Rust (Linux)
|
||||||
if: inputs.os == 'linux' && inputs.cpu == 'amd64'
|
if: inputs.os == 'linux'
|
||||||
|
shell: ${{ inputs.shell }} {0}
|
||||||
|
run: |
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${{ inputs.rust_version }} -y
|
||||||
|
|
||||||
|
- name: APT (Linux amd64/arm64)
|
||||||
|
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
sudo apt-fast update -qq
|
sudo apt-fast update -qq
|
||||||
|
@ -45,6 +54,7 @@ runs:
|
||||||
if: inputs.os == 'windows' && inputs.cpu == 'amd64'
|
if: inputs.os == 'windows' && inputs.cpu == 'amd64'
|
||||||
uses: msys2/setup-msys2@v2
|
uses: msys2/setup-msys2@v2
|
||||||
with:
|
with:
|
||||||
|
path-type: inherit
|
||||||
msystem: UCRT64
|
msystem: UCRT64
|
||||||
install: >
|
install: >
|
||||||
base-devel
|
base-devel
|
||||||
|
@ -52,11 +62,13 @@ runs:
|
||||||
mingw-w64-ucrt-x86_64-toolchain
|
mingw-w64-ucrt-x86_64-toolchain
|
||||||
mingw-w64-ucrt-x86_64-cmake
|
mingw-w64-ucrt-x86_64-cmake
|
||||||
mingw-w64-ucrt-x86_64-ntldd-git
|
mingw-w64-ucrt-x86_64-ntldd-git
|
||||||
|
mingw-w64-ucrt-x86_64-rust
|
||||||
|
|
||||||
- name: MSYS2 (Windows i386)
|
- name: MSYS2 (Windows i386)
|
||||||
if: inputs.os == 'windows' && inputs.cpu == 'i386'
|
if: inputs.os == 'windows' && inputs.cpu == 'i386'
|
||||||
uses: msys2/setup-msys2@v2
|
uses: msys2/setup-msys2@v2
|
||||||
with:
|
with:
|
||||||
|
path-type: inherit
|
||||||
msystem: MINGW32
|
msystem: MINGW32
|
||||||
install: >
|
install: >
|
||||||
base-devel
|
base-devel
|
||||||
|
@ -64,6 +76,13 @@ runs:
|
||||||
mingw-w64-i686-toolchain
|
mingw-w64-i686-toolchain
|
||||||
mingw-w64-i686-cmake
|
mingw-w64-i686-cmake
|
||||||
mingw-w64-i686-ntldd-git
|
mingw-w64-i686-ntldd-git
|
||||||
|
mingw-w64-i686-rust
|
||||||
|
|
||||||
|
- name: MSYS2 (Windows All) - Downgrade to gcc 13
|
||||||
|
if: inputs.os == 'windows'
|
||||||
|
shell: ${{ inputs.shell }} {0}
|
||||||
|
run: |
|
||||||
|
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
|
||||||
|
|
||||||
- name: Derive environment variables
|
- name: Derive environment variables
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
|
@ -73,15 +92,10 @@ runs:
|
||||||
printf "'%s'" "$quoted"
|
printf "'%s'" "$quoted"
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
[[ '${{ inputs.cpu }}' == 'i386' ]] && echo "ARCH_OVERRIDE=ARCH_OVERRIDE=x86" >> ${GITHUB_ENV}
|
||||||
PLATFORM=x64
|
|
||||||
else
|
|
||||||
PLATFORM=x86
|
|
||||||
fi
|
|
||||||
echo "PLATFORM=${PLATFORM}" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
# Stack usage on Linux amd64
|
# Stack usage on Linux amd64/arm64
|
||||||
if [[ '${{ inputs.os }}' == 'linux' && '${{ inputs.cpu }}' == 'amd64' ]]; then
|
if [[ '${{ inputs.os }}' == 'linux' && ('${{ inputs.cpu }}' == 'amd64' || '${{ inputs.cpu }}' == 'arm64')]]; then
|
||||||
NIMFLAGS="${NIMFLAGS} -d:limitStackUsage"
|
NIMFLAGS="${NIMFLAGS} -d:limitStackUsage"
|
||||||
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
|
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
@ -135,35 +149,28 @@ runs:
|
||||||
# Use all available CPUs for build process
|
# Use all available CPUs for build process
|
||||||
ncpu=""
|
ncpu=""
|
||||||
case '${{ inputs.os }}' in
|
case '${{ inputs.os }}' in
|
||||||
'linux')
|
'linux') ncpu=$(nproc) ;;
|
||||||
ncpu=$(nproc)
|
'macos') ncpu=$(sysctl -n hw.ncpu) ;;
|
||||||
;;
|
'windows') ncpu=${NUMBER_OF_PROCESSORS} ;;
|
||||||
'macos')
|
|
||||||
ncpu=$(sysctl -n hw.ncpu)
|
|
||||||
;;
|
|
||||||
'windows')
|
|
||||||
ncpu=${NUMBER_OF_PROCESSORS}
|
|
||||||
;;
|
|
||||||
esac
|
esac
|
||||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||||
echo "ncpu=${ncpu}" >> ${GITHUB_ENV}
|
echo "ncpu=${ncpu}" >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: Restore Nim toolchain binaries from cache
|
- name: Restore Nim toolchain binaries from cache
|
||||||
id: nim-cache
|
id: nim-cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: NimBinaries
|
path: NimBinaries
|
||||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||||
restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}
|
||||||
|
|
||||||
- name: Set NIM_COMMIT
|
- name: Set NIM_COMMIT
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: echo "NIM_COMMIT=${{ inputs.nim_branch }}" >> ${GITHUB_ENV}
|
run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: Build Nim and Codex dependencies
|
- name: Build Nim and Codex dependencies
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
make -j${ncpu} CI_CACHE=NimBinaries ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1 update
|
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
|
||||||
echo
|
echo
|
||||||
./env.sh nim --version
|
./env.sh nim --version
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
Tips for shorter build times
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
### Runner availability ###
|
||||||
|
|
||||||
|
Currently, the biggest bottleneck when optimizing workflows is the availability
|
||||||
|
of Windows and macOS runners. Therefore, anything that reduces the time spent in
|
||||||
|
Windows or macOS jobs will have a positive impact on the time waiting for
|
||||||
|
runners to become available. The usage limits for Github Actions are [described
|
||||||
|
here][limits]. You can see a breakdown of runner usage for your jobs in the
|
||||||
|
Github Actions tab ([example][usage]).
|
||||||
|
|
||||||
|
### Windows is slow ###
|
||||||
|
|
||||||
|
Performing git operations and compilation are both slow on Windows. This can
|
||||||
|
easily mean that a Windows job takes twice as long as a Linux job. Therefore it
|
||||||
|
makes sense to use a Windows runner only for testing Windows compatibility, and
|
||||||
|
nothing else. Testing compatibility with other versions of Nim, code coverage
|
||||||
|
analysis, etc. are therefore better performed on a Linux runner.
|
||||||
|
|
||||||
|
### Parallelization ###
|
||||||
|
|
||||||
|
Breaking up a long build job into several jobs that you run in parallel can have
|
||||||
|
a positive impact on the wall clock time that a workflow runs. For instance, you
|
||||||
|
might consider running unit tests and integration tests in parallel. Keep in
|
||||||
|
mind however that availability of macOS and Windows runners is the biggest
|
||||||
|
bottleneck. If you split a Windows job into two jobs, you now need to wait for
|
||||||
|
two Windows runners to become available! Therefore parallelization often only
|
||||||
|
makes sense for Linux jobs.
|
||||||
|
|
||||||
|
### Refactoring ###
|
||||||
|
|
||||||
|
As with any code, complex workflows are hard to read and change. You can use
|
||||||
|
[composite actions][composite] and [reusable workflows][reusable] to refactor
|
||||||
|
complex workflows.
|
||||||
|
|
||||||
|
### Steps for measuring time
|
||||||
|
|
||||||
|
Breaking up steps allows you to see the time spent in each part. For instance,
|
||||||
|
instead of having one step where all tests are performed, you might consider
|
||||||
|
having separate steps for e.g. unit tests and integration tests, so that you can
|
||||||
|
see how much time is spent in each.
|
||||||
|
|
||||||
|
### Fix slow tests ###
|
||||||
|
|
||||||
|
Try to avoid slow unit tests. They not only slow down continuous integration,
|
||||||
|
but also local development. If you encounter slow tests you can consider
|
||||||
|
reworking them to stub out the slow parts that are not under test, or use
|
||||||
|
smaller data structures for the test.
|
||||||
|
|
||||||
|
You can use [unittest2][unittest2] together with the environment variable
|
||||||
|
`NIMTEST_TIMING=true` to show how much time is spent in every test
|
||||||
|
([reference][testtime]).
|
||||||
|
|
||||||
|
### Caching ###
|
||||||
|
|
||||||
|
Ensure that caches are updated over time. For instance if you cache the latest
|
||||||
|
version of the Nim compiler, then you want to update the cache when a new
|
||||||
|
version of the compiler is released. See also the documentation
|
||||||
|
for the [cache action][cache].
|
||||||
|
|
||||||
|
### Fail fast ###
|
||||||
|
|
||||||
|
By default a workflow fails fast: if one job fails, the rest are cancelled. This
|
||||||
|
might seem inconvenient, because when you're debugging an issue you often want
|
||||||
|
to know whether you introduced a failure on all platforms, or only on a single
|
||||||
|
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
|
||||||
|
runners busy for longer on a workflow that you know is going to fail anyway.
|
||||||
|
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
|
||||||
|
|
||||||
|
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
|
||||||
|
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
|
||||||
|
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||||
|
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache
|
||||||
|
[unittest2]: https://github.com/status-im/nim-unittest2
|
||||||
|
[testtime]: https://github.com/status-im/nim-unittest2/pull/12
|
||||||
|
[limits]: https://docs.github.com/en/actions/learn-github-actions/usage-limits-billing-and-administration#usage-limits
|
|
@ -0,0 +1,88 @@
|
||||||
|
name: Reusable - CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
matrix:
|
||||||
|
type: string
|
||||||
|
cache_nonce:
|
||||||
|
default: '0'
|
||||||
|
description: Allows for easily busting actions/cache caches
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
|
||||||
|
env:
|
||||||
|
cache_nonce: ${{ inputs.cache_nonce }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include: ${{ fromJson(inputs.matrix) }}
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: ${{ matrix.shell }} {0}
|
||||||
|
|
||||||
|
name: '${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.tests }}'
|
||||||
|
runs-on: ${{ matrix.builder }}
|
||||||
|
timeout-minutes: 90
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: Setup Nimbus Build System
|
||||||
|
uses: ./.github/actions/nimbus-build-system
|
||||||
|
with:
|
||||||
|
os: ${{ matrix.os }}
|
||||||
|
shell: ${{ matrix.shell }}
|
||||||
|
nim_version: ${{ matrix.nim_version }}
|
||||||
|
|
||||||
|
## Part 1 Tests ##
|
||||||
|
- name: Unit tests
|
||||||
|
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
||||||
|
run: make -j${ncpu} test
|
||||||
|
|
||||||
|
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 18.15
|
||||||
|
|
||||||
|
- name: Start Ethereum node with Codex contracts
|
||||||
|
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'all'
|
||||||
|
working-directory: vendor/codex-contracts-eth
|
||||||
|
env:
|
||||||
|
MSYS2_PATH_TYPE: inherit
|
||||||
|
run: |
|
||||||
|
npm install
|
||||||
|
npm start &
|
||||||
|
|
||||||
|
## Part 2 Tests ##
|
||||||
|
- name: Contract tests
|
||||||
|
if: matrix.tests == 'contract' || matrix.tests == 'all'
|
||||||
|
run: make -j${ncpu} testContracts
|
||||||
|
|
||||||
|
## Part 3 Tests ##
|
||||||
|
- name: Integration tests
|
||||||
|
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
||||||
|
run: make -j${ncpu} testIntegration
|
||||||
|
|
||||||
|
- name: Upload integration tests log files
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
|
||||||
|
path: tests/integration/logs/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
status:
|
||||||
|
if: always()
|
||||||
|
needs: [build]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }}
|
||||||
|
run: exit 1
|
|
@ -1,106 +1,74 @@
|
||||||
name: CI
|
name: CI
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- master
|
||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
|
nim_version: pinned
|
||||||
|
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
matrix:
|
||||||
strategy:
|
runs-on: ubuntu-latest
|
||||||
matrix:
|
outputs:
|
||||||
os: [linux, macos, windows]
|
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||||
include:
|
cache_nonce: ${{ env.cache_nonce }}
|
||||||
- os: linux
|
|
||||||
builder: ubuntu-latest
|
|
||||||
shell: bash --noprofile --norc -e -o pipefail
|
|
||||||
- os: macos
|
|
||||||
builder: macos-latest
|
|
||||||
shell: bash --noprofile --norc -e -o pipefail
|
|
||||||
- os: windows
|
|
||||||
builder: windows-latest
|
|
||||||
shell: msys2
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: ${{ matrix.shell }} {0}
|
|
||||||
|
|
||||||
name: '${{ matrix.os }}'
|
|
||||||
runs-on: ${{ matrix.builder }}
|
|
||||||
timeout-minutes: 80
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Compute matrix
|
||||||
uses: actions/checkout@v3
|
id: matrix
|
||||||
with:
|
uses: fabiocaccamo/create-matrix-action@v4
|
||||||
submodules: recursive
|
with:
|
||||||
|
matrix: |
|
||||||
|
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||||
|
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||||
|
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||||
|
|
||||||
- name: Setup Nimbus Build System
|
build:
|
||||||
uses: ./.github/actions/nimbus-build-system
|
needs: matrix
|
||||||
with:
|
uses: ./.github/workflows/ci-reusable.yml
|
||||||
os: ${{ matrix.os }}
|
with:
|
||||||
shell: ${{ matrix.shell }}
|
matrix: ${{ needs.matrix.outputs.matrix }}
|
||||||
|
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
||||||
- name: Unit tests
|
|
||||||
run: make -j${ncpu} test
|
|
||||||
|
|
||||||
- name: Start Ethereum node with Codex contracts
|
|
||||||
working-directory: vendor/dagger-contracts
|
|
||||||
run: |
|
|
||||||
if [[ '${{ matrix.os }}' == 'windows' ]]; then
|
|
||||||
export PATH="${PATH}:/c/program files/nodejs"
|
|
||||||
fi
|
|
||||||
npm install
|
|
||||||
npm start &
|
|
||||||
|
|
||||||
- name: Contract tests
|
|
||||||
run: make -j${ncpu} testContracts
|
|
||||||
|
|
||||||
- name: Integration tests
|
|
||||||
run: make -j${ncpu} testIntegration
|
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
continue-on-error: true
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
- name: Setup Nimbus Build System
|
- name: Setup Nimbus Build System
|
||||||
uses: ./.github/actions/nimbus-build-system
|
uses: ./.github/actions/nimbus-build-system
|
||||||
with:
|
with:
|
||||||
os: linux
|
os: linux
|
||||||
|
nim_version: ${{ env.nim_version }}
|
||||||
|
|
||||||
- name: Generate coverage data
|
- name: Generate coverage data
|
||||||
run: make -j${ncpu} coverage
|
run: |
|
||||||
|
# make -j${ncpu} coverage
|
||||||
|
make -j${ncpu} coverage-script
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: Upload coverage data to Codecov
|
- name: Upload coverage data to Codecov
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v4
|
||||||
with:
|
with:
|
||||||
directory: ./coverage/
|
directory: ./coverage/
|
||||||
fail_ci_if_error: true
|
fail_ci_if_error: true
|
||||||
files: ./coverage/coverage.f.info
|
files: ./coverage/coverage.f.info
|
||||||
flags: unittests
|
flags: unittests
|
||||||
name: codecov-umbrella
|
name: codecov-umbrella
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
nim_1_2:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Setup Nimbus Build System
|
|
||||||
uses: ./.github/actions/nimbus-build-system
|
|
||||||
with:
|
|
||||||
os: linux
|
|
||||||
nim_branch: version-1-2
|
|
||||||
|
|
||||||
- name: Unit tests
|
|
||||||
run: make -j${ncpu} test
|
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
name: Docker - Dist-Tests
|
||||||
|
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
tags:
|
||||||
|
- 'v*.*.*'
|
||||||
|
paths-ignore:
|
||||||
|
- '**/*.md'
|
||||||
|
- '.gitignore'
|
||||||
|
- '.github/**'
|
||||||
|
- '!.github/workflows/docker-dist-tests.yml'
|
||||||
|
- '!.github/workflows/docker-reusable.yml'
|
||||||
|
- 'docker/**'
|
||||||
|
- '!docker/codex.Dockerfile'
|
||||||
|
- '!docker/docker-entrypoint.sh'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push:
|
||||||
|
name: Build and Push
|
||||||
|
uses: ./.github/workflows/docker-reusable.yml
|
||||||
|
with:
|
||||||
|
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true'
|
||||||
|
nat_ip_auto: true
|
||||||
|
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
tag_suffix: dist-tests
|
||||||
|
continuous_tests_list: PeersTest HoldMyBeerTest
|
||||||
|
continuous_tests_duration: 12h
|
||||||
|
secrets: inherit
|
|
@ -0,0 +1,267 @@
|
||||||
|
name: Reusable - Docker
|
||||||
|
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
docker_file:
|
||||||
|
default: docker/codex.Dockerfile
|
||||||
|
description: Dockerfile
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
docker_repo:
|
||||||
|
default: codexstorage/nim-codex
|
||||||
|
description: DockerHub repository
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
make_parallel:
|
||||||
|
default: 4
|
||||||
|
description: Make parallel
|
||||||
|
required: false
|
||||||
|
type: number
|
||||||
|
nimflags:
|
||||||
|
default: '-d:disableMarchNative'
|
||||||
|
description: Nim flags for builds
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
nat_ip_auto:
|
||||||
|
default: false
|
||||||
|
description: Enable NAT IP auto
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
tag_latest:
|
||||||
|
default: true
|
||||||
|
description: Set latest tag for Docker images
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
tag_sha:
|
||||||
|
default: true
|
||||||
|
description: Set Git short commit as Docker tag
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
tag_suffix:
|
||||||
|
default: ''
|
||||||
|
description: Suffix for Docker images tag
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
continuous_tests_list:
|
||||||
|
default: ''
|
||||||
|
description: Continuous Tests list
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
continuous_tests_duration:
|
||||||
|
default: 48h
|
||||||
|
description: Continuous Tests duration
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Build
|
||||||
|
DOCKER_FILE: ${{ inputs.docker_file }}
|
||||||
|
DOCKER_REPO: ${{ inputs.docker_repo }}
|
||||||
|
MAKE_PARALLEL: ${{ inputs.make_parallel }}
|
||||||
|
NIMFLAGS: ${{ inputs.nimflags }}
|
||||||
|
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
|
||||||
|
TAG_LATEST: ${{ inputs.tag_latest }}
|
||||||
|
TAG_SHA: ${{ inputs.tag_sha }}
|
||||||
|
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
||||||
|
# Tests
|
||||||
|
CONTINUOUS_TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
||||||
|
CONTINUOUS_TESTS_BRANCH: master
|
||||||
|
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
|
||||||
|
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
|
||||||
|
CONTINUOUS_TESTS_NAMEPREFIX: c-tests-ci
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Build platform specific image
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
target:
|
||||||
|
- os: linux
|
||||||
|
arch: amd64
|
||||||
|
- os: linux
|
||||||
|
arch: arm64
|
||||||
|
include:
|
||||||
|
- target:
|
||||||
|
os: linux
|
||||||
|
arch: amd64
|
||||||
|
builder: ubuntu-22.04
|
||||||
|
- target:
|
||||||
|
os: linux
|
||||||
|
arch: arm64
|
||||||
|
builder: buildjet-4vcpu-ubuntu-2204-arm
|
||||||
|
|
||||||
|
name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }}
|
||||||
|
runs-on: ${{ matrix.builder }}
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ format('{0}/{1}', 'linux', matrix.target.arch) }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Docker - Meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ${{ env.DOCKER_REPO }}
|
||||||
|
|
||||||
|
- name: Docker - Set up Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Docker - Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Docker - Build and Push by digest
|
||||||
|
id: build
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ${{ env.DOCKER_FILE }}
|
||||||
|
platforms: ${{ env.PLATFORM }}
|
||||||
|
push: true
|
||||||
|
build-args: |
|
||||||
|
MAKE_PARALLEL=${{ env.MAKE_PARALLEL }}
|
||||||
|
NIMFLAGS=${{ env.NIMFLAGS }}
|
||||||
|
NAT_IP_AUTO=${{ env.NAT_IP_AUTO }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
outputs: type=image,name=${{ env.DOCKER_REPO }},push-by-digest=true,name-canonical=true,push=true
|
||||||
|
|
||||||
|
- name: Docker - Export digest
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/digests
|
||||||
|
digest="${{ steps.build.outputs.digest }}"
|
||||||
|
touch "/tmp/digests/${digest#sha256:}"
|
||||||
|
|
||||||
|
- name: Docker - Upload digest
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: digests-${{ matrix.target.arch }}
|
||||||
|
path: /tmp/digests/*
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
|
||||||
|
# Publish multi-platform image
|
||||||
|
publish:
|
||||||
|
name: Publish multi-platform image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
version: ${{ steps.meta.outputs.version }}
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- name: Docker - Variables
|
||||||
|
run: |
|
||||||
|
# Adjust custom suffix when set and
|
||||||
|
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
|
||||||
|
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||||
|
fi
|
||||||
|
# Disable SHA tags on tagged release
|
||||||
|
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
|
||||||
|
echo "TAG_SHA=false" >>$GITHUB_ENV
|
||||||
|
fi
|
||||||
|
# Handle latest and latest-custom using raw
|
||||||
|
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
|
||||||
|
echo "TAG_LATEST=false" >>$GITHUB_ENV
|
||||||
|
echo "TAG_RAW=true" >>$GITHUB_ENV
|
||||||
|
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
|
||||||
|
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "TAG_RAW=false" >>$GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Docker - Download digests
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
pattern: digests-*
|
||||||
|
merge-multiple: true
|
||||||
|
path: /tmp/digests
|
||||||
|
|
||||||
|
- name: Docker - Set up Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Docker - Meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ${{ env.DOCKER_REPO }}
|
||||||
|
flavor: |
|
||||||
|
latest=${{ env.TAG_LATEST }}
|
||||||
|
suffix=${{ env.TAG_SUFFIX }},onlatest=true
|
||||||
|
tags: |
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=raw,enable=${{ env.TAG_RAW }},value=latest
|
||||||
|
type=sha,enable=${{ env.TAG_SHA }}
|
||||||
|
|
||||||
|
- name: Docker - Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Docker - Create manifest list and push
|
||||||
|
working-directory: /tmp/digests
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||||
|
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
|
||||||
|
|
||||||
|
- name: Docker - Inspect image
|
||||||
|
run: |
|
||||||
|
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
|
||||||
|
|
||||||
|
|
||||||
|
# Compute Continuous Tests inputs
|
||||||
|
compute-tests-inputs:
|
||||||
|
name: Compute Continuous Tests list
|
||||||
|
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: publish
|
||||||
|
outputs:
|
||||||
|
source: ${{ steps.compute.outputs.source }}
|
||||||
|
branch: ${{ steps.compute.outputs.branch }}
|
||||||
|
codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }}
|
||||||
|
nameprefix: ${{ steps.compute.outputs.nameprefix }}
|
||||||
|
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
|
||||||
|
continuous_tests_duration: ${{ steps.compute.outputs.continuous_tests_duration }}
|
||||||
|
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
|
||||||
|
workflow_source: ${{ steps.compute.outputs.workflow_source }}
|
||||||
|
steps:
|
||||||
|
- name: Compute Continuous Tests list
|
||||||
|
id: compute
|
||||||
|
run: |
|
||||||
|
echo "source=${{ format('{0}/{1}', github.server_url, env.CONTINUOUS_TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "branch=${{ env.CONTINUOUS_TESTS_BRANCH }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "nameprefix=$(awk '{ print tolower($0) }' <<< ${{ env.CONTINUOUS_TESTS_NAMEPREFIX }})" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "continuous_tests_list=$(jq -cR 'split(" ")' <<< '${{ env.CONTINUOUS_TESTS_LIST }}')" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "continuous_tests_duration=${{ env.CONTINUOUS_TESTS_DURATION }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "workflow_source=${{ env.CONTINUOUS_TESTS_SOURCE }}" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
|
||||||
|
# Run Continuous Tests
|
||||||
|
run-tests:
|
||||||
|
name: Run Continuous Tests
|
||||||
|
needs: [publish, compute-tests-inputs]
|
||||||
|
strategy:
|
||||||
|
max-parallel: 1
|
||||||
|
matrix:
|
||||||
|
tests: ${{ fromJSON(needs.compute-tests-inputs.outputs.continuous_tests_list) }}
|
||||||
|
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
||||||
|
with:
|
||||||
|
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||||
|
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||||
|
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
|
||||||
|
nameprefix: ${{ needs.compute-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||||
|
tests_filter: ${{ matrix.tests }}
|
||||||
|
tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||||
|
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
|
||||||
|
secrets: inherit
|
|
@ -0,0 +1,28 @@
|
||||||
|
name: Docker
|
||||||
|
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
tags:
|
||||||
|
- 'v*.*.*'
|
||||||
|
paths-ignore:
|
||||||
|
- '**/*.md'
|
||||||
|
- '.gitignore'
|
||||||
|
- '.github/**'
|
||||||
|
- '!.github/workflows/docker.yml'
|
||||||
|
- '!.github/workflows/docker-reusable.yml'
|
||||||
|
- 'docker/**'
|
||||||
|
- '!docker/codex.Dockerfile'
|
||||||
|
- '!docker/docker-entrypoint.sh'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push:
|
||||||
|
name: Build and Push
|
||||||
|
uses: ./.github/workflows/docker-reusable.yml
|
||||||
|
with:
|
||||||
|
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
secrets: inherit
|
|
@ -0,0 +1,65 @@
|
||||||
|
name: OpenAPI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
paths:
|
||||||
|
- 'openapi.yaml'
|
||||||
|
- '.github/workflows/docs.yml'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'openapi.yaml'
|
||||||
|
- '.github/workflows/docs.yml'
|
||||||
|
|
||||||
|
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: '0'
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 18
|
||||||
|
|
||||||
|
- name: Lint OpenAPI
|
||||||
|
shell: bash
|
||||||
|
run: npx @redocly/cli lint openapi.yaml
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
name: Deploy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.ref == 'refs/heads/master'
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: '0'
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 18
|
||||||
|
|
||||||
|
- name: Build OpenAPI
|
||||||
|
shell: bash
|
||||||
|
run: npx @redocly/cli build-docs openapi.yaml --output "openapi/index.html" --title "Codex API"
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-pages-artifact@v3
|
||||||
|
with:
|
||||||
|
path: './openapi'
|
||||||
|
|
||||||
|
- name: Deploy to GitHub Pages
|
||||||
|
uses: actions/deploy-pages@v4
|
|
@ -0,0 +1,30 @@
|
||||||
|
name: Nim matrix
|
||||||
|
|
||||||
|
on:
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
|
nim_version: pinned
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
matrix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||||
|
cache_nonce: ${{ env.cache_nonce }}
|
||||||
|
steps:
|
||||||
|
- name: Compute matrix
|
||||||
|
id: matrix
|
||||||
|
uses: fabiocaccamo/create-matrix-action@v4
|
||||||
|
with:
|
||||||
|
matrix: |
|
||||||
|
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: matrix
|
||||||
|
uses: ./.github/workflows/ci-reusable.yml
|
||||||
|
with:
|
||||||
|
matrix: ${{ needs.matrix.outputs.matrix }}
|
||||||
|
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
|
@ -0,0 +1,158 @@
|
||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*.*.*'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
|
nim_version: pinned
|
||||||
|
rust_version: 1.78.0
|
||||||
|
binary_base: codex
|
||||||
|
build_dir: build
|
||||||
|
nim_flags: '-d:verify_circuit=true'
|
||||||
|
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Matrix
|
||||||
|
matrix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
- name: Compute matrix
|
||||||
|
id: matrix
|
||||||
|
uses: fabiocaccamo/create-matrix-action@v4
|
||||||
|
with:
|
||||||
|
matrix: |
|
||||||
|
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
os {linux}, cpu {arm64}, builder {buildjet-4vcpu-ubuntu-2204-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
|
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
||||||
|
|
||||||
|
# Build
|
||||||
|
build:
|
||||||
|
needs: matrix
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include: ${{fromJson(needs.matrix.outputs.matrix)}}
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: ${{ matrix.shell }} {0}
|
||||||
|
|
||||||
|
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}
|
||||||
|
runs-on: ${{ matrix.builder }}
|
||||||
|
timeout-minutes: 80
|
||||||
|
steps:
|
||||||
|
- name: Release - Checkout sources
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Release - Setup Nimbus Build System
|
||||||
|
uses: ./.github/actions/nimbus-build-system
|
||||||
|
with:
|
||||||
|
os: ${{ matrix.os }}
|
||||||
|
cpu: ${{ matrix.cpu }}
|
||||||
|
shell: ${{ matrix.shell }}
|
||||||
|
nim_version: ${{ matrix.nim_version }}
|
||||||
|
rust_version: ${{ matrix.rust_version }}
|
||||||
|
|
||||||
|
- name: Release - Compute binary name
|
||||||
|
run: |
|
||||||
|
case ${{ matrix.os }} in
|
||||||
|
linux*) os_name="linux" ;;
|
||||||
|
macos*) os_name="darwin" ;;
|
||||||
|
windows*) os_name="windows" ;;
|
||||||
|
esac
|
||||||
|
binary="${{ env.binary_base }}-${{ github.ref_name }}-${os_name}-${{ matrix.cpu }}"
|
||||||
|
[[ ${os_name} == "windows" ]] && binary="${binary}.exe"
|
||||||
|
echo "binary=${binary}" >>$GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Release - Build
|
||||||
|
run: |
|
||||||
|
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.binary }} ${{ env.nim_flags }}"
|
||||||
|
|
||||||
|
- name: Release - Libraries
|
||||||
|
run: |
|
||||||
|
if [[ "${{ matrix.os }}" == "windows" ]]; then
|
||||||
|
for lib in ${{ env.windows_libs }}; do
|
||||||
|
cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Release - Upload build artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: release-${{ env.binary }}
|
||||||
|
path: ${{ env.build_dir }}/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
# Release
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
if: success() || failure()
|
||||||
|
steps:
|
||||||
|
- name: Release - Download binaries
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
pattern: release*
|
||||||
|
merge-multiple: true
|
||||||
|
path: /tmp/release
|
||||||
|
|
||||||
|
- name: Release - Compress and checksum
|
||||||
|
run: |
|
||||||
|
cd /tmp/release
|
||||||
|
checksum() {
|
||||||
|
arc="${1}"
|
||||||
|
sha256sum "${arc}" >"${arc}.sha256"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compress and prepare
|
||||||
|
for file in *; do
|
||||||
|
# Exclude libraries
|
||||||
|
if [[ "${file}" != *".dll"* ]]; then
|
||||||
|
if [[ "${file}" == *".exe"* ]]; then
|
||||||
|
|
||||||
|
# Windows - binary only
|
||||||
|
arc="${file%.*}.zip"
|
||||||
|
zip "${arc}" "${file}"
|
||||||
|
checksum "${arc}"
|
||||||
|
|
||||||
|
# Windows - binary and libs
|
||||||
|
arc="${file%.*}-libs.zip"
|
||||||
|
zip "${arc}" "${file}" ${{ env.windows_libs }}
|
||||||
|
rm -f "${file}" ${{ env.windows_libs }}
|
||||||
|
checksum "${arc}"
|
||||||
|
else
|
||||||
|
|
||||||
|
# Linux/macOS
|
||||||
|
arc="${file}.tar.gz"
|
||||||
|
chmod 755 "${file}"
|
||||||
|
tar cfz "${arc}" "${file}"
|
||||||
|
rm -f "${file}"
|
||||||
|
checksum "${arc}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Release - Upload compressed artifacts and checksums
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: archives-and-checksums
|
||||||
|
path: /tmp/release/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
- name: Release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
/tmp/release/*
|
||||||
|
make_latest: true
|
|
@ -3,6 +3,7 @@
|
||||||
!*.*
|
!*.*
|
||||||
*.exe
|
*.exe
|
||||||
|
|
||||||
|
!LICENSE*
|
||||||
!Makefile
|
!Makefile
|
||||||
|
|
||||||
nimcache/
|
nimcache/
|
||||||
|
@ -15,6 +16,8 @@ coverage/
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
/vendor/.nimble
|
/vendor/.nimble
|
||||||
|
/vendor/packages/
|
||||||
|
# /vendor/*/
|
||||||
|
|
||||||
# Nimble user files
|
# Nimble user files
|
||||||
nimble.develop
|
nimble.develop
|
||||||
|
@ -23,6 +26,9 @@ nimble.paths
|
||||||
# vscode
|
# vscode
|
||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
|
# JetBrain's IDEs
|
||||||
|
.idea
|
||||||
|
|
||||||
# Each developer can create a personal .env file with
|
# Each developer can create a personal .env file with
|
||||||
# local settings overrides (e.g. WEB3_URL)
|
# local settings overrides (e.g. WEB3_URL)
|
||||||
.env
|
.env
|
||||||
|
@ -30,3 +36,8 @@ nimble.paths
|
||||||
.update.timestamp
|
.update.timestamp
|
||||||
codex.nims
|
codex.nims
|
||||||
nimbus-build-system.paths
|
nimbus-build-system.paths
|
||||||
|
docker/hostdatadir
|
||||||
|
docker/prometheus-data
|
||||||
|
.DS_Store
|
||||||
|
nim.cfg
|
||||||
|
tests/integration/logs
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/nim-libp2p"]
|
[submodule "vendor/nim-libp2p"]
|
||||||
path = vendor/nim-libp2p
|
path = vendor/nim-libp2p
|
||||||
url = https://github.com/status-im/nim-libp2p.git
|
url = https://github.com/vacp2p/nim-libp2p.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/nimcrypto"]
|
[submodule "vendor/nimcrypto"]
|
||||||
|
@ -133,10 +133,6 @@
|
||||||
url = https://github.com/status-im/nim-websock.git
|
url = https://github.com/status-im/nim-websock.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/dagger-contracts"]
|
|
||||||
path = vendor/dagger-contracts
|
|
||||||
url = https://github.com/status-im/dagger-contracts
|
|
||||||
ignore = dirty
|
|
||||||
[submodule "vendor/nim-contract-abi"]
|
[submodule "vendor/nim-contract-abi"]
|
||||||
path = vendor/nim-contract-abi
|
path = vendor/nim-contract-abi
|
||||||
url = https://github.com/status-im/nim-contract-abi
|
url = https://github.com/status-im/nim-contract-abi
|
||||||
|
@ -168,9 +164,9 @@
|
||||||
[submodule "vendor/nim-leopard"]
|
[submodule "vendor/nim-leopard"]
|
||||||
path = vendor/nim-leopard
|
path = vendor/nim-leopard
|
||||||
url = https://github.com/status-im/nim-leopard.git
|
url = https://github.com/status-im/nim-leopard.git
|
||||||
[submodule "vendor/nim-libp2p-dht"]
|
[submodule "vendor/nim-codex-dht"]
|
||||||
path = vendor/nim-libp2p-dht
|
path = vendor/nim-codex-dht
|
||||||
url = https://github.com/status-im/nim-libp2p-dht.git
|
url = https://github.com/codex-storage/nim-codex-dht.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/nim-datastore"]
|
[submodule "vendor/nim-datastore"]
|
||||||
|
@ -182,3 +178,40 @@
|
||||||
[submodule "vendor/nim-eth"]
|
[submodule "vendor/nim-eth"]
|
||||||
path = vendor/nim-eth
|
path = vendor/nim-eth
|
||||||
url = https://github.com/status-im/nim-eth
|
url = https://github.com/status-im/nim-eth
|
||||||
|
[submodule "vendor/codex-contracts-eth"]
|
||||||
|
path = vendor/codex-contracts-eth
|
||||||
|
url = https://github.com/status-im/codex-contracts-eth
|
||||||
|
[submodule "vendor/nim-protobuf-serialization"]
|
||||||
|
path = vendor/nim-protobuf-serialization
|
||||||
|
url = https://github.com/status-im/nim-protobuf-serialization
|
||||||
|
[submodule "vendor/nim-results"]
|
||||||
|
path = vendor/nim-results
|
||||||
|
url = https://github.com/arnetheduck/nim-results
|
||||||
|
[submodule "vendor/nim-testutils"]
|
||||||
|
path = vendor/nim-testutils
|
||||||
|
url = https://github.com/status-im/nim-testutils
|
||||||
|
[submodule "vendor/npeg"]
|
||||||
|
path = vendor/npeg
|
||||||
|
url = https://github.com/zevv/npeg
|
||||||
|
[submodule "vendor/nim-poseidon2"]
|
||||||
|
path = vendor/nim-poseidon2
|
||||||
|
url = https://github.com/codex-storage/nim-poseidon2.git
|
||||||
|
[submodule "vendor/constantine"]
|
||||||
|
path = vendor/constantine
|
||||||
|
url = https://github.com/mratsim/constantine.git
|
||||||
|
[submodule "vendor/nim-circom-compat"]
|
||||||
|
path = vendor/nim-circom-compat
|
||||||
|
url = https://github.com/codex-storage/nim-circom-compat.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = master
|
||||||
|
[submodule "vendor/codex-storage-proofs-circuits"]
|
||||||
|
path = vendor/codex-storage-proofs-circuits
|
||||||
|
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = master
|
||||||
|
[submodule "vendor/nim-serde"]
|
||||||
|
path = vendor/nim-serde
|
||||||
|
url = https://github.com/codex-storage/nim-serde.git
|
||||||
|
[submodule "vendor/nim-leveldbstatic"]
|
||||||
|
path = vendor/nim-leveldbstatic
|
||||||
|
url = https://github.com/codex-storage/nim-leveldb.git
|
||||||
|
|
135
BUILDING.md
135
BUILDING.md
|
@ -18,45 +18,57 @@
|
||||||
|
|
||||||
To build nim-codex, developer tools need to be installed and accessible in the OS.
|
To build nim-codex, developer tools need to be installed and accessible in the OS.
|
||||||
|
|
||||||
Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/status-im/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work.
|
Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/codex-storage/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work.
|
||||||
|
|
||||||
Other approaches may be viable. On macOS, some users may prefer [MacPorts](https://www.macports.org/) to [Homebrew](https://brew.sh/). On Windows, rather than use MSYS2, some users may prefer to install developer tools with [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/), [Scoop](https://scoop.sh/), or [Chocolatey](https://chocolatey.org/), or download installers for e.g. Make and CMake while otherwise relying on official Windows developer tools. Community contributions to these docs and our build system are welcome!
|
Other approaches may be viable. On macOS, some users may prefer [MacPorts](https://www.macports.org/) to [Homebrew](https://brew.sh/). On Windows, rather than use MSYS2, some users may prefer to install developer tools with [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/), [Scoop](https://scoop.sh/), or [Chocolatey](https://chocolatey.org/), or download installers for e.g. Make and CMake while otherwise relying on official Windows developer tools. Community contributions to these docs and our build system are welcome!
|
||||||
|
|
||||||
|
### Rust
|
||||||
|
|
||||||
|
The current implementation of Codex's zero-knowledge proving circuit requires the installation of rust v1.76.0 or greater. Be sure to install it for your OS and add it to your terminal's path such that the command `cargo --version` gives a compatible version.
|
||||||
|
|
||||||
### Linux
|
### Linux
|
||||||
|
|
||||||
*Package manager commands may require `sudo` depending on OS setup.*
|
*Package manager commands may require `sudo` depending on OS setup.*
|
||||||
|
|
||||||
On a bare bones installation of Debian (or a distribution derived from Debian, such as Ubuntu), run
|
On a bare bones installation of Debian (or a distribution derived from Debian, such as Ubuntu), run
|
||||||
|
|
||||||
```text
|
```shell
|
||||||
$ apt-get update && apt-get install build-essential cmake curl git
|
apt-get update && apt-get install build-essential cmake curl git rustc cargo
|
||||||
```
|
```
|
||||||
|
|
||||||
Non-Debian distributions have different package managers: `apk`, `dnf`, `pacman`, `rpm`, `yum`, etc.
|
Non-Debian distributions have different package managers: `apk`, `dnf`, `pacman`, `rpm`, `yum`, etc.
|
||||||
|
|
||||||
For example, on a bare bones installation of Fedora, run
|
For example, on a bare bones installation of Fedora, run
|
||||||
|
|
||||||
```text
|
```shell
|
||||||
$ dnf install @development-tools cmake gcc-c++ which
|
dnf install @development-tools cmake gcc-c++ rust cargo
|
||||||
|
```
|
||||||
|
|
||||||
|
In case your distribution does not provide required Rust version, we may install it using [rustup](https://www.rust-lang.org/tools/install)
|
||||||
|
```shell
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=1.76.0 -y
|
||||||
|
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
```
|
```
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
Install the [Xcode Command Line Tools](https://mac.install.guide/commandlinetools/index.html) by opening a terminal and running
|
Install the [Xcode Command Line Tools](https://mac.install.guide/commandlinetools/index.html) by opening a terminal and running
|
||||||
```text
|
```shell
|
||||||
$ xcode-select --install
|
xcode-select --install
|
||||||
```
|
```
|
||||||
|
|
||||||
Install [Homebrew (`brew`)](https://brew.sh/) and in a new terminal run
|
Install [Homebrew (`brew`)](https://brew.sh/) and in a new terminal run
|
||||||
```text
|
```shell
|
||||||
$ brew install bash cmake
|
brew install bash cmake rust
|
||||||
```
|
```
|
||||||
|
|
||||||
Check that `PATH` is setup correctly
|
Check that `PATH` is setup correctly
|
||||||
```text
|
```shell
|
||||||
$ which bash cmake
|
which bash cmake
|
||||||
/usr/local/bin/bash
|
|
||||||
/usr/local/bin/cmake
|
# /usr/local/bin/bash
|
||||||
|
# /usr/local/bin/cmake
|
||||||
```
|
```
|
||||||
|
|
||||||
### Windows + MSYS2
|
### Windows + MSYS2
|
||||||
|
@ -68,14 +80,40 @@ Download and run the installer from [msys2.org](https://www.msys2.org/).
|
||||||
Launch an MSYS2 [environment](https://www.msys2.org/docs/environments/). UCRT64 is generally recommended: from the Windows *Start menu* select `MSYS2 MinGW UCRT x64`.
|
Launch an MSYS2 [environment](https://www.msys2.org/docs/environments/). UCRT64 is generally recommended: from the Windows *Start menu* select `MSYS2 MinGW UCRT x64`.
|
||||||
|
|
||||||
Assuming a UCRT64 environment, in Bash run
|
Assuming a UCRT64 environment, in Bash run
|
||||||
```text
|
```shell
|
||||||
$ pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake
|
pacman -Suy
|
||||||
|
pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-rust
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- #### Headless Windows container -->
|
<!-- #### Headless Windows container -->
|
||||||
<!-- add instructions re: getting setup with MSYS2 in a Windows container -->
|
<!-- add instructions re: getting setup with MSYS2 in a Windows container -->
|
||||||
<!-- https://github.com/StefanScherer/windows-docker-machine -->
|
<!-- https://github.com/StefanScherer/windows-docker-machine -->
|
||||||
|
|
||||||
|
#### Optional: VSCode Terminal integration
|
||||||
|
|
||||||
|
You can link the MSYS2-UCRT64 terminal into VSCode by modifying the configuration file as shown below.
|
||||||
|
File: `C:/Users/<username>/AppData/Roaming/Code/User/settings.json`
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"terminal.integrated.profiles.windows": {
|
||||||
|
...
|
||||||
|
"MSYS2-UCRT64": {
|
||||||
|
"path": "C:\\msys64\\usr\\bin\\bash.exe",
|
||||||
|
"args": [
|
||||||
|
"--login",
|
||||||
|
"-i"
|
||||||
|
],
|
||||||
|
"env": {
|
||||||
|
"MSYSTEM": "UCRT64",
|
||||||
|
"CHERE_INVOKING": "1",
|
||||||
|
"MSYS2_PATH_TYPE": "inherit"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Other
|
### Other
|
||||||
|
|
||||||
It is possible that nim-codex can be built and run on other platforms supported by the [Nim](https://nim-lang.org/) language: BSD family, older versions of Windows, etc. There has not been sufficient experimentation with nim-codex on such platforms, so instructions are not provided. Community contributions to these docs and our build system are welcome!
|
It is possible that nim-codex can be built and run on other platforms supported by the [Nim](https://nim-lang.org/) language: BSD family, older versions of Windows, etc. There has not been sufficient experimentation with nim-codex on such platforms, so instructions are not provided. Community contributions to these docs and our build system are welcome!
|
||||||
|
@ -83,30 +121,30 @@ It is possible that nim-codex can be built and run on other platforms supported
|
||||||
## Repository
|
## Repository
|
||||||
|
|
||||||
In Bash run
|
In Bash run
|
||||||
```text
|
```shell
|
||||||
$ git clone https://github.com/status-im/nim-codex.git repos/nim-codex && cd repos/nim-codex
|
git clone https://github.com/codex-storage/nim-codex.git repos/nim-codex && cd repos/nim-codex
|
||||||
```
|
```
|
||||||
|
|
||||||
nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system#readme), so next run
|
nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system), so next run
|
||||||
```text
|
```shell
|
||||||
$ make update
|
make update
|
||||||
```
|
```
|
||||||
|
|
||||||
This step can take a while to complete because by default it builds the [Nim compiler](https://nim-lang.org/docs/nimc.html).
|
This step can take a while to complete because by default it builds the [Nim compiler](https://nim-lang.org/docs/nimc.html).
|
||||||
|
|
||||||
To see more output from `make` pass `V=1`. This works for all `make` targets in projects using the nimbus-build-system
|
To see more output from `make` pass `V=1`. This works for all `make` targets in projects using the nimbus-build-system
|
||||||
```text
|
```shell
|
||||||
$ make V=1 update
|
make V=1 update
|
||||||
```
|
```
|
||||||
|
|
||||||
## Executable
|
## Executable
|
||||||
|
|
||||||
In Bash run
|
In Bash run
|
||||||
```text
|
```shell
|
||||||
$ make exec
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
The `exec` target creates the `build/codex` executable.
|
The default `make` target creates the `build/codex` executable.
|
||||||
|
|
||||||
## Example usage
|
## Example usage
|
||||||
|
|
||||||
|
@ -115,29 +153,40 @@ See the [instructions](README.md#cli-options) in the main readme.
|
||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
In Bash run
|
In Bash run
|
||||||
```text
|
```shell
|
||||||
$ make test
|
make test
|
||||||
```
|
```
|
||||||
|
|
||||||
### testAll
|
### testAll
|
||||||
|
|
||||||
|
#### Prerequisites
|
||||||
|
|
||||||
|
To run the integration tests, an Ethereum test node is required. Follow these instructions to set it up.
|
||||||
|
|
||||||
|
##### Windows (do this before 'All platforms')
|
||||||
|
|
||||||
|
1. Download and install Visual Studio 2017 or newer. (Not VSCode!) In the Workloads overview, enable `Desktop development with C++`. ( https://visualstudio.microsoft.com )
|
||||||
|
|
||||||
|
##### All platforms
|
||||||
|
|
||||||
|
1. Install NodeJS (tested with v18.14.0), consider using NVM as a version manager. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme)
|
||||||
|
1. Open a terminal
|
||||||
|
1. Go to the vendor/codex-contracts-eth folder: `cd /<git-root>/vendor/codex-contracts-eth/`
|
||||||
|
1. `npm install` -> Should complete with the number of packages added and an overview of known vulnerabilities.
|
||||||
|
1. `npm test` -> Should output test results. May take a minute.
|
||||||
|
|
||||||
|
Before the integration tests are started, you must start the Ethereum test node manually.
|
||||||
|
1. Open a terminal
|
||||||
|
1. Go to the vendor/codex-contracts-eth folder: `cd /<git-root>/vendor/codex-contracts-eth/`
|
||||||
|
1. `npm start` -> This should launch Hardhat, and output a number of keys and a warning message.
|
||||||
|
|
||||||
|
#### Run
|
||||||
|
|
||||||
The `testAll` target runs the same tests as `make test` and also runs tests for nim-codex's Ethereum contracts, as well a basic suite of integration tests.
|
The `testAll` target runs the same tests as `make test` and also runs tests for nim-codex's Ethereum contracts, as well a basic suite of integration tests.
|
||||||
|
|
||||||
To run `make testAll`, Node.js needs to be installed. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme) is a flexible means to do that and it works on Linux, macOS, and Windows + MSYS2.
|
To run `make testAll`.
|
||||||
|
|
||||||
With `nvm` installed, launch a separate terminal and download the latest LTS version of Node.js
|
Use a new terminal to run:
|
||||||
```text
|
```shell
|
||||||
$ nvm install --lts
|
make testAll
|
||||||
```
|
|
||||||
|
|
||||||
In that same terminal run
|
|
||||||
```text
|
|
||||||
$ cd repos/nim-codex/vendor/dagger-contracts && npm install && npm start
|
|
||||||
```
|
|
||||||
|
|
||||||
Those commands install and launch a [Hardhat](https://hardhat.org/) environment with nim-codex's Ethereum contracts.
|
|
||||||
|
|
||||||
In the other terminal run
|
|
||||||
```text
|
|
||||||
$ make testAll
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2024 Codex Storage
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,199 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,19 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
62
Makefile
62
Makefile
|
@ -5,6 +5,30 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed except
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
# according to those terms.
|
# according to those terms.
|
||||||
|
|
||||||
|
# This is the Nim version used locally and in regular CI builds.
|
||||||
|
# Can be a specific version tag, a branch name, or a commit hash.
|
||||||
|
# Can be overridden by setting the NIM_COMMIT environment variable
|
||||||
|
# before calling make.
|
||||||
|
#
|
||||||
|
# For readability in CI, if NIM_COMMIT is set to "pinned",
|
||||||
|
# this will also default to the version pinned here.
|
||||||
|
#
|
||||||
|
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
||||||
|
# version pinned by nimbus-build-system.
|
||||||
|
PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
||||||
|
|
||||||
|
ifeq ($(NIM_COMMIT),)
|
||||||
|
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||||
|
else ifeq ($(NIM_COMMIT),pinned)
|
||||||
|
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(NIM_COMMIT),nimbusbuild)
|
||||||
|
undefine NIM_COMMIT
|
||||||
|
else
|
||||||
|
export NIM_COMMIT
|
||||||
|
endif
|
||||||
|
|
||||||
SHELL := bash # the shell used internally by Make
|
SHELL := bash # the shell used internally by Make
|
||||||
|
|
||||||
# used inside the included makefiles
|
# used inside the included makefiles
|
||||||
|
@ -44,7 +68,11 @@ GIT_SUBMODULE_UPDATE := git submodule update --init --recursive
|
||||||
else # "variables.mk" was included. Business as usual until the end of this file.
|
else # "variables.mk" was included. Business as usual until the end of this file.
|
||||||
|
|
||||||
# default target, because it's the first one that doesn't start with '.'
|
# default target, because it's the first one that doesn't start with '.'
|
||||||
all: | test
|
|
||||||
|
# Builds the codex binary
|
||||||
|
all: | build deps
|
||||||
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
|
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
|
||||||
|
|
||||||
# must be included after the default target
|
# must be included after the default target
|
||||||
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
|
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
|
||||||
|
@ -56,15 +84,12 @@ else
|
||||||
NIM_PARAMS := $(NIM_PARAMS) -d:release
|
NIM_PARAMS := $(NIM_PARAMS) -d:release
|
||||||
endif
|
endif
|
||||||
|
|
||||||
deps: | deps-common nat-libs codex.nims
|
deps: | deps-common nat-libs
|
||||||
ifneq ($(USE_LIBBACKTRACE), 0)
|
ifneq ($(USE_LIBBACKTRACE), 0)
|
||||||
deps: | libbacktrace
|
deps: | libbacktrace
|
||||||
endif
|
endif
|
||||||
|
|
||||||
#- deletes and recreates "codex.nims" which on Windows is a copy instead of a proper symlink
|
|
||||||
update: | update-common
|
update: | update-common
|
||||||
rm -rf codex.nims && \
|
|
||||||
$(MAKE) codex.nims $(HANDLE_OUTPUT)
|
|
||||||
|
|
||||||
# detecting the os
|
# detecting the os
|
||||||
ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10...
|
ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10...
|
||||||
|
@ -79,31 +104,27 @@ endif
|
||||||
# Builds and run a part of the test suite
|
# Builds and run a part of the test suite
|
||||||
test: | build deps
|
test: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) codex.nims
|
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
|
||||||
|
|
||||||
# Builds and runs the smart contract tests
|
# Builds and runs the smart contract tests
|
||||||
testContracts: | build deps
|
testContracts: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) codex.nims
|
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
|
||||||
|
|
||||||
# Builds and runs the integration tests
|
# Builds and runs the integration tests
|
||||||
testIntegration: | build deps
|
testIntegration: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) codex.nims
|
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
|
||||||
|
|
||||||
# Builds and runs all tests
|
# Builds and runs all tests (except for Taiko L2 tests)
|
||||||
testAll: | build deps
|
testAll: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) codex.nims
|
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
|
||||||
|
|
||||||
# Builds the codex binary
|
# Builds and runs Taiko L2 tests
|
||||||
exec: | build deps
|
testTaiko: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) codex.nims
|
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) codex.nims
|
||||||
|
|
||||||
# symlink
|
|
||||||
codex.nims:
|
|
||||||
ln -s codex.nimble $@
|
|
||||||
|
|
||||||
# nim-libbacktrace
|
# nim-libbacktrace
|
||||||
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
||||||
|
@ -128,8 +149,15 @@ coverage:
|
||||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
genhtml coverage/coverage.f.info --output-directory coverage/report
|
||||||
|
|
||||||
|
show-coverage:
|
||||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||||
|
|
||||||
|
coverage-script: build deps
|
||||||
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
|
$(ENV_SCRIPT) nim coverage $(NIM_PARAMS) build.nims
|
||||||
|
echo "Run `make show-coverage` to view coverage results"
|
||||||
|
|
||||||
# usual cleaning
|
# usual cleaning
|
||||||
clean: | clean-common
|
clean: | clean-common
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
178
README.md
178
README.md
|
@ -7,9 +7,11 @@
|
||||||
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
|
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
|
||||||
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
|
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
|
||||||
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
|
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
|
||||||
[![CI](https://github.com/status-im/nim-codex/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/status-im/nim-codex/actions?query=workflow%3ACI+branch%3Amain)
|
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
|
||||||
[![Codecov](https://codecov.io/gh/status-im/nim-codex/branch/main/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/status-im/nim-codex)
|
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
|
||||||
|
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex)
|
||||||
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
|
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
|
||||||
|
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
|
||||||
|
|
||||||
|
|
||||||
## Build and Run
|
## Build and Run
|
||||||
|
@ -19,7 +21,7 @@ For detailed instructions on preparing to build nim-codex see [*Building Codex*]
|
||||||
To build the project, clone it and run:
|
To build the project, clone it and run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make update && make exec
|
make update && make
|
||||||
```
|
```
|
||||||
|
|
||||||
The executable will be placed under the `build` directory under the project root.
|
The executable will be placed under the `build` directory under the project root.
|
||||||
|
@ -29,6 +31,35 @@ Run the client with:
|
||||||
```bash
|
```bash
|
||||||
build/codex
|
build/codex
|
||||||
```
|
```
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
It is possible to configure a Codex node in several ways:
|
||||||
|
1. CLI options
|
||||||
|
2. Env. variable
|
||||||
|
3. Config
|
||||||
|
|
||||||
|
The order of priority is the same as above: Cli arguments > Env variables > Config file values.
|
||||||
|
|
||||||
|
### Environment variables
|
||||||
|
|
||||||
|
In order to set a configuration option using environment variables, first find the desired CLI option
|
||||||
|
and then transform it in the following way:
|
||||||
|
|
||||||
|
1. prepend it with `CODEX_`
|
||||||
|
2. make it uppercase
|
||||||
|
3. replace `-` with `_`
|
||||||
|
|
||||||
|
For example, to configure `--log-level`, use `CODEX_LOG_LEVEL` as the environment variable name.
|
||||||
|
|
||||||
|
### Configuration file
|
||||||
|
|
||||||
|
A [TOML](https://toml.io/en/) configuration file can also be used to set configuration values. Configuration option names and corresponding values are placed in the file, separated by `=`. Configuration option names can be obtained from the `codex --help` command, and should not include the `--` prefix. For example, a node's log level (`--log-level`) can be configured using TOML as follows:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
log-level = "trace"
|
||||||
|
```
|
||||||
|
|
||||||
|
The Codex node can then read the configuration from this file using the `--config-file` CLI parameter, like `codex --config-file=/path/to/your/config.toml`.
|
||||||
|
|
||||||
### CLI Options
|
### CLI Options
|
||||||
|
|
||||||
|
@ -40,104 +71,77 @@ codex [OPTIONS]... command
|
||||||
|
|
||||||
The following options are available:
|
The following options are available:
|
||||||
|
|
||||||
--log-level Sets the log level [=LogLevel.INFO].
|
--config-file Loads the configuration from a TOML file [=none].
|
||||||
|
--log-level Sets the log level [=info].
|
||||||
--metrics Enable the metrics server [=false].
|
--metrics Enable the metrics server [=false].
|
||||||
--metrics-address Listening address of the metrics server [=127.0.0.1].
|
--metrics-address Listening address of the metrics server [=127.0.0.1].
|
||||||
--metrics-port Listening HTTP port of the metrics server [=8008].
|
--metrics-port Listening HTTP port of the metrics server [=8008].
|
||||||
-d, --data-dir The directory where codex will store configuration and data..
|
-d, --data-dir The directory where codex will store configuration and data.
|
||||||
-l, --listen-port Specifies one or more listening ports for the node to listen on. [=0].
|
-i, --listen-addrs Multi Addresses to listen on [=/ip4/0.0.0.0/tcp/0].
|
||||||
-i, --listen-ip The public IP [=0.0.0.0].
|
-a, --nat IP Addresses to announce behind a NAT [=127.0.0.1].
|
||||||
--udp-port Specify the discovery (UDP) port [=8090].
|
-e, --disc-ip Discovery listen address [=0.0.0.0].
|
||||||
--net-privkey Source of network (secp256k1) private key file (random|<path>) [=random].
|
-u, --disc-port Discovery (UDP) port [=8090].
|
||||||
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network..
|
--net-privkey Source of network (secp256k1) private key file path or name [=key].
|
||||||
|
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network.
|
||||||
--max-peers The maximum number of peers to connect to [=160].
|
--max-peers The maximum number of peers to connect to [=160].
|
||||||
--agent-string Node agent string which is used as identifier in network [=Codex].
|
--agent-string Node agent string which is used as identifier in network [=Codex].
|
||||||
|
--api-bindaddr The REST API bind address [=127.0.0.1].
|
||||||
-p, --api-port The REST Api port [=8080].
|
-p, --api-port The REST Api port [=8080].
|
||||||
-c, --cache-size The size in MiB of the block cache, 0 disables the cache [=100].
|
--repo-kind Backend for main repo store (fs, sqlite) [=fs].
|
||||||
--persistence Enables persistence mechanism, requires an Ethereum node [=false].
|
-q, --storage-quota The size of the total storage quota dedicated to the node [=8589934592].
|
||||||
--eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545].
|
-t, --block-ttl Default block timeout in seconds - 0 disables the ttl [=$DefaultBlockTtl].
|
||||||
--eth-account The Ethereum account that is used for storage contracts [=EthAddress.none].
|
--block-mi Time interval in seconds - determines frequency of block maintenance cycle: how
|
||||||
--eth-deployment The json file describing the contract deployment [=string.none].
|
often blocks are checked for expiration and cleanup
|
||||||
|
[=$DefaultBlockMaintenanceInterval].
|
||||||
|
--block-mn Number of blocks to check every maintenance cycle [=1000].
|
||||||
|
-c, --cache-size The size of the block cache, 0 disables the cache - might help on slow hardrives
|
||||||
|
[=0].
|
||||||
|
|
||||||
Available sub-commands:
|
Available sub-commands:
|
||||||
|
|
||||||
codex initNode
|
codex persistence [OPTIONS]... command
|
||||||
|
|
||||||
|
The following options are available:
|
||||||
|
|
||||||
|
--eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545].
|
||||||
|
--eth-account The Ethereum account that is used for storage contracts.
|
||||||
|
--eth-private-key File containing Ethereum private key for storage contracts.
|
||||||
|
--marketplace-address Address of deployed Marketplace contract.
|
||||||
|
--validator Enables validator, requires an Ethereum node [=false].
|
||||||
|
--validator-max-slots Maximum number of slots that the validator monitors [=1000].
|
||||||
|
|
||||||
|
Available sub-commands:
|
||||||
|
|
||||||
|
codex persistence prover [OPTIONS]...
|
||||||
|
|
||||||
|
The following options are available:
|
||||||
|
|
||||||
|
--circom-r1cs The r1cs file for the storage circuit.
|
||||||
|
--circom-wasm The wasm file for the storage circuit.
|
||||||
|
--circom-zkey The zkey file for the storage circuit.
|
||||||
|
--circom-no-zkey Ignore the zkey file - use only for testing! [=false].
|
||||||
|
--proof-samples Number of samples to prove [=5].
|
||||||
|
--max-slot-depth The maximum depth of the slot tree [=32].
|
||||||
|
--max-dataset-depth The maximum depth of the dataset tree [=8].
|
||||||
|
--max-block-depth The maximum depth of the network block merkle tree [=5].
|
||||||
|
--max-cell-elements The maximum number of elements in a cell [=67].
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example: running two Codex clients
|
#### Logging
|
||||||
|
|
||||||
```bash
|
Codex uses [Chronicles](https://github.com/status-im/nim-chronicles) logging library, which allows great flexibility in working with logs.
|
||||||
build/codex --data-dir="$(pwd)/Codex1" -i=127.0.0.1
|
Chronicles has the concept of topics, which categorize log entries into semantic groups.
|
||||||
```
|
|
||||||
|
|
||||||
This will start codex with a data directory pointing to `Codex1` under the current execution directory and announce itself on the DHT under `127.0.0.1`.
|
Using the `log-level` parameter, you can set the top-level log level like `--log-level="trace"`, but more importantly,
|
||||||
|
you can set log levels for specific topics like `--log-level="info; trace: marketplace,node; error: blockexchange"`,
|
||||||
|
which sets the top-level log level to `info` and then for topics `marketplace` and `node` sets the level to `trace` and so on.
|
||||||
|
|
||||||
To run a second client that automatically discovers nodes on the network, we need to get the Signed Peer Record (SPR) of first client, Client1. We can do this by querying the `/info` endpoint of the node's REST API.
|
### Guides
|
||||||
|
|
||||||
`curl http://127.0.0.1:8080/api/codex/v1/info`
|
To get acquainted with Codex, consider:
|
||||||
|
* running the simple [Codex Two-Client Test](docs/TwoClientTest.md) for a start, and;
|
||||||
|
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](docs/Marketplace.md) using a local blockchain as well.
|
||||||
|
|
||||||
This should output information about Client1, including its PeerID, TCP/UDP addresses, data directory, and SPR:
|
## API
|
||||||
|
|
||||||
```json
|
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
||||||
{
|
|
||||||
"id": "16Uiu2HAm92LGXYTuhtLaZzkFnsCx6FFJsNmswK6o9oPXFbSKHQEa",
|
|
||||||
"addrs": [
|
|
||||||
"/ip4/0.0.0.0/udp/8090",
|
|
||||||
"/ip4/0.0.0.0/tcp/49336"
|
|
||||||
],
|
|
||||||
"repo": "/repos/status-im/nim-codex/Codex1",
|
|
||||||
"spr": "spr:CiUIAhIhAmqg5fVU2yxPStLdUOWgwrkWZMHW2MHf6i6l8IjA4tssEgIDARpICicAJQgCEiECaqDl9VTbLE9K0t1Q5aDCuRZkwdbYwd_qLqXwiMDi2ywQ5v2VlAYaCwoJBH8AAAGRAh-aGgoKCAR_AAABBts3KkcwRQIhAPOKl38CviplVbMVnA_9q3N1K_nk5oGuNp7DWeOqiJzzAiATQ2acPyQvPxLU9YS-TiVo4RUXndRcwMFMX2Yjhw8k3A"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, let's start a second client, Client2. Because we're already using the default ports TCP (:8080) and UDP (:8090) for the first client, we have to specify new ports to avoid a collision. Additionally, we can specify the SPR from Client1 as the bootstrap node for discovery purposes, allowing Client2 to determine where content is located in the network.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
build/codex --data-dir="$(pwd)/Codex2" -i=127.0.0.1 --api-port=8081 --udp-port=8091 --bootstrap-node=spr:CiUIAhIhAmqg5fVU2yxPStLdUOWgwrkWZMHW2MHf6i6l8IjA4tssEgIDARpICicAJQgCEiECaqDl9VTbLE9K0t1Q5aDCuRZkwdbYwd_qLqXwiMDi2ywQ5v2VlAYaCwoJBH8AAAGRAh-aGgoKCAR_AAABBts3KkcwRQIhAPOKl38CviplVbMVnA_9q3N1K_nk5oGuNp7DWeOqiJzzAiATQ2acPyQvPxLU9YS-TiVo4RUXndRcwMFMX2Yjhw8k3A
|
|
||||||
```
|
|
||||||
|
|
||||||
There are now two clients running. We could upload a file to Client1 and download that file (given its CID) using Client2, by using the clients' REST API.
|
|
||||||
|
|
||||||
## Interacting with the client
|
|
||||||
|
|
||||||
The client exposes a REST API that can be used to interact with the clients. These commands could be invoked with any HTTP client, however the following endpoints assume the use of the `curl` command.
|
|
||||||
|
|
||||||
### `/api/codex/v1/connect/{peerId}`
|
|
||||||
|
|
||||||
Connect to a peer identified by its peer id. Takes an optional `addrs` parameter with a list of valid [multiaddresses](https://multiformats.io/multiaddr/). If `addrs` is absent, the peer will be discovered over the DHT.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl "127.0.0.1:8080/api/codex/v1/connect/<peer id>?addrs=<multiaddress>"
|
|
||||||
```
|
|
||||||
|
|
||||||
### `/api/codex/v1/download/{id}`
|
|
||||||
|
|
||||||
Download data identified by a `Cid`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -vvv "127.0.0.1:8080/api/codex/v1/download/<Cid of the content>" --output <name of output file>
|
|
||||||
```
|
|
||||||
|
|
||||||
### `/api/codex/v1/upload`
|
|
||||||
|
|
||||||
Upload a file, upon success returns the `Cid` of the uploaded file.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -vvv -H "content-type: application/octet-stream" -H Expect: -T "<path to file>" "127.0.0.1:8080/api/codex/v1/upload" -X POST
|
|
||||||
```
|
|
||||||
|
|
||||||
### `/api/codex/v1/info`
|
|
||||||
|
|
||||||
Get useful node info such as its peer id, address and SPR.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -vvv "127.0.0.1:8080/api/codex/v1/info"
|
|
||||||
```
|
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
ceremony
|
||||||
|
circuit_bench_*
|
|
@ -0,0 +1,33 @@
|
||||||
|
|
||||||
|
## Benchmark Runner
|
||||||
|
|
||||||
|
Modify `runAllBenchmarks` proc in `run_benchmarks.nim` to the desired parameters and variations.
|
||||||
|
|
||||||
|
Then run it:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
nim c -r run_benchmarks
|
||||||
|
```
|
||||||
|
|
||||||
|
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
|
||||||
|
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
||||||
|
|
||||||
|
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
|
||||||
|
|
||||||
|
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
|
||||||
|
|
||||||
|
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
||||||
|
|
||||||
|
## Codex Ark Circom CLI
|
||||||
|
|
||||||
|
Runs Codex's prover setup with Ark / Circom.
|
||||||
|
|
||||||
|
Compile:
|
||||||
|
```sh
|
||||||
|
nim c codex_ark_prover_cli.nim
|
||||||
|
```
|
||||||
|
|
||||||
|
Run to see usage:
|
||||||
|
```sh
|
||||||
|
./codex_ark_prover_cli.nim -h
|
||||||
|
```
|
|
@ -0,0 +1,15 @@
|
||||||
|
--path:
|
||||||
|
".."
|
||||||
|
--path:
|
||||||
|
"../tests"
|
||||||
|
--threads:
|
||||||
|
on
|
||||||
|
--tlsEmulation:
|
||||||
|
off
|
||||||
|
--d:
|
||||||
|
release
|
||||||
|
|
||||||
|
# when not defined(chronicles_log_level):
|
||||||
|
# --define:"chronicles_log_level:NONE" # compile all log statements
|
||||||
|
# --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime
|
||||||
|
# --"import":"logging" # ensure that logging is ignored at runtime
|
|
@ -0,0 +1,187 @@
|
||||||
|
import std/[hashes, json, strutils, strformat, os, osproc, uri]
|
||||||
|
|
||||||
|
import ./utils
|
||||||
|
|
||||||
|
type
|
||||||
|
CircuitEnv* = object
|
||||||
|
nimCircuitCli*: string
|
||||||
|
circuitDirIncludes*: string
|
||||||
|
ptauPath*: string
|
||||||
|
ptauUrl*: Uri
|
||||||
|
codexProjDir*: string
|
||||||
|
|
||||||
|
CircuitArgs* = object
|
||||||
|
depth*: int
|
||||||
|
maxslots*: int
|
||||||
|
cellsize*: int
|
||||||
|
blocksize*: int
|
||||||
|
nsamples*: int
|
||||||
|
entropy*: int
|
||||||
|
seed*: int
|
||||||
|
nslots*: int
|
||||||
|
ncells*: int
|
||||||
|
index*: int
|
||||||
|
|
||||||
|
proc findCodexProjectDir(): string =
|
||||||
|
## find codex proj dir -- assumes this script is in codex/benchmarks
|
||||||
|
result = currentSourcePath().parentDir.parentDir
|
||||||
|
|
||||||
|
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
|
||||||
|
let codexDir = findCodexProjectDir()
|
||||||
|
result.nimCircuitCli =
|
||||||
|
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
|
||||||
|
"proof_input" / "cli"
|
||||||
|
result.circuitDirIncludes =
|
||||||
|
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
|
||||||
|
result.ptauPath =
|
||||||
|
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
|
||||||
|
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
|
||||||
|
result.codexProjDir = codexDir
|
||||||
|
|
||||||
|
proc check*(env: var CircuitEnv) =
|
||||||
|
## check that the CWD of script is in the codex parent
|
||||||
|
let codexProjDir = findCodexProjectDir()
|
||||||
|
echo "\n\nFound project dir: ", codexProjDir
|
||||||
|
|
||||||
|
let snarkjs = findExe("snarkjs")
|
||||||
|
if snarkjs == "":
|
||||||
|
echo dedent"""
|
||||||
|
ERROR: must install snarkjs first
|
||||||
|
|
||||||
|
npm install -g snarkjs@latest
|
||||||
|
"""
|
||||||
|
|
||||||
|
let circom = findExe("circom")
|
||||||
|
if circom == "":
|
||||||
|
echo dedent"""
|
||||||
|
ERROR: must install circom first
|
||||||
|
|
||||||
|
git clone https://github.com/iden3/circom.git
|
||||||
|
cargo install --path circom
|
||||||
|
"""
|
||||||
|
|
||||||
|
if snarkjs == "" or circom == "":
|
||||||
|
quit 2
|
||||||
|
|
||||||
|
echo "Found SnarkJS: ", snarkjs
|
||||||
|
echo "Found Circom: ", circom
|
||||||
|
|
||||||
|
if not env.nimCircuitCli.fileExists:
|
||||||
|
echo "Nim Circuit reference cli not found: ", env.nimCircuitCli
|
||||||
|
echo "Building Circuit reference cli...\n"
|
||||||
|
withDir env.nimCircuitCli.parentDir:
|
||||||
|
runit "nimble build -d:release --styleCheck:off cli"
|
||||||
|
echo "CWD: ", getCurrentDir()
|
||||||
|
assert env.nimCircuitCli.fileExists()
|
||||||
|
|
||||||
|
echo "Found NimCircuitCli: ", env.nimCircuitCli
|
||||||
|
echo "Found Circuit Path: ", env.circuitDirIncludes
|
||||||
|
echo "Found PTAU file: ", env.ptauPath
|
||||||
|
|
||||||
|
proc downloadPtau*(ptauPath: string, ptauUrl: Uri) =
|
||||||
|
## download ptau file using curl if needed
|
||||||
|
if not ptauPath.fileExists:
|
||||||
|
echo "Ceremony file not found, downloading..."
|
||||||
|
createDir ptauPath.parentDir
|
||||||
|
withDir ptauPath.parentDir:
|
||||||
|
runit fmt"curl --output '{ptauPath}' '{$ptauUrl}/{ptauPath.splitPath().tail}'"
|
||||||
|
else:
|
||||||
|
echo "Found PTAU file at: ", ptauPath
|
||||||
|
|
||||||
|
proc getCircuitBenchStr*(args: CircuitArgs): string =
|
||||||
|
for f, v in fieldPairs(args):
|
||||||
|
result &= "_" & f & $v
|
||||||
|
|
||||||
|
proc getCircuitBenchPath*(args: CircuitArgs, env: CircuitEnv): string =
|
||||||
|
## generate folder name for unique circuit args
|
||||||
|
result = env.codexProjDir / "benchmarks/circuit_bench" & getCircuitBenchStr(args)
|
||||||
|
|
||||||
|
proc generateCircomAndSamples*(args: CircuitArgs, env: CircuitEnv, name: string) =
|
||||||
|
## run nim circuit and sample generator
|
||||||
|
var cliCmd = env.nimCircuitCli
|
||||||
|
for f, v in fieldPairs(args):
|
||||||
|
cliCmd &= " --" & f & "=" & $v
|
||||||
|
|
||||||
|
if not "input.json".fileExists:
|
||||||
|
echo "Generating Circom Files..."
|
||||||
|
runit fmt"{cliCmd} -v --circom={name}.circom --output=input.json"
|
||||||
|
|
||||||
|
proc createCircuit*(
|
||||||
|
args: CircuitArgs,
|
||||||
|
env: CircuitEnv,
|
||||||
|
name = "proof_main",
|
||||||
|
circBenchDir = getCircuitBenchPath(args, env),
|
||||||
|
someEntropy = "some_entropy_75289v3b7rcawcsyiur",
|
||||||
|
doGenerateWitness = false,
|
||||||
|
): tuple[dir: string, name: string] =
|
||||||
|
## Generates all the files needed for to run a proof circuit. Downloads the PTAU file if needed.
|
||||||
|
##
|
||||||
|
## All needed circuit files will be generated as needed.
|
||||||
|
## They will be located in `circBenchDir` which defaults to a folder like:
|
||||||
|
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
||||||
|
## with all the given CircuitArgs.
|
||||||
|
##
|
||||||
|
let circdir = circBenchDir
|
||||||
|
|
||||||
|
downloadPtau env.ptauPath, env.ptauUrl
|
||||||
|
|
||||||
|
echo "Creating circuit dir: ", circdir
|
||||||
|
createDir circdir
|
||||||
|
withDir circdir:
|
||||||
|
writeFile("circuit_params.json", pretty(%*args))
|
||||||
|
let
|
||||||
|
inputs = circdir / "input.json"
|
||||||
|
zkey = circdir / fmt"{name}.zkey"
|
||||||
|
wasm = circdir / fmt"{name}.wasm"
|
||||||
|
r1cs = circdir / fmt"{name}.r1cs"
|
||||||
|
wtns = circdir / fmt"{name}.wtns"
|
||||||
|
|
||||||
|
generateCircomAndSamples(args, env, name)
|
||||||
|
|
||||||
|
if not wasm.fileExists or not r1cs.fileExists:
|
||||||
|
runit fmt"circom --r1cs --wasm --O2 -l{env.circuitDirIncludes} {name}.circom"
|
||||||
|
moveFile fmt"{name}_js" / fmt"{name}.wasm", fmt"{name}.wasm"
|
||||||
|
echo "Found wasm: ", wasm
|
||||||
|
echo "Found r1cs: ", r1cs
|
||||||
|
|
||||||
|
if not zkey.fileExists:
|
||||||
|
echo "ZKey not found, generating..."
|
||||||
|
putEnv "NODE_OPTIONS", "--max-old-space-size=8192"
|
||||||
|
if not fmt"{name}_0000.zkey".fileExists:
|
||||||
|
runit fmt"snarkjs groth16 setup {r1cs} {env.ptauPath} {name}_0000.zkey"
|
||||||
|
echo fmt"Generated {name}_0000.zkey"
|
||||||
|
|
||||||
|
let cmd =
|
||||||
|
fmt"snarkjs zkey contribute {name}_0000.zkey {name}_0001.zkey --name='1st Contributor Name'"
|
||||||
|
echo "CMD: ", cmd
|
||||||
|
let cmdRes = execCmdEx(cmd, options = {}, input = someEntropy & "\n")
|
||||||
|
assert cmdRes.exitCode == 0
|
||||||
|
|
||||||
|
moveFile fmt"{name}_0001.zkey", fmt"{name}.zkey"
|
||||||
|
removeFile fmt"{name}_0000.zkey"
|
||||||
|
|
||||||
|
if not wtns.fileExists and doGenerateWitness:
|
||||||
|
runit fmt"node generate_witness.js {wtns} ../input.json ../witness.wtns"
|
||||||
|
|
||||||
|
return (circdir, name)
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
echo "findCodexProjectDir: ", findCodexProjectDir()
|
||||||
|
## test run creating a circuit
|
||||||
|
var env = CircuitEnv.default()
|
||||||
|
env.check()
|
||||||
|
|
||||||
|
let args = CircuitArgs(
|
||||||
|
depth: 32, # maximum depth of the slot tree
|
||||||
|
maxslots: 256, # maximum number of slots
|
||||||
|
cellsize: 2048, # cell size in bytes
|
||||||
|
blocksize: 65536, # block size in bytes
|
||||||
|
nsamples: 5, # number of samples to prove
|
||||||
|
entropy: 1234567, # external randomness
|
||||||
|
seed: 12345, # seed for creating fake data
|
||||||
|
nslots: 11, # number of slots in the dataset
|
||||||
|
index: 3, # which slot we prove (0..NSLOTS-1)
|
||||||
|
ncells: 512, # number of cells in this slot
|
||||||
|
)
|
||||||
|
let benchenv = createCircuit(args, env)
|
||||||
|
echo "\nBench dir:\n", benchenv
|
|
@ -0,0 +1,105 @@
|
||||||
|
import std/[sequtils, strformat, os, options, importutils]
|
||||||
|
import std/[times, os, strutils, terminal]
|
||||||
|
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
import pkg/datastore
|
||||||
|
|
||||||
|
import pkg/codex/[rng, stores, merkletree, codextypes, slots]
|
||||||
|
import pkg/codex/utils/[json, poseidon2digest]
|
||||||
|
import pkg/codex/slots/[builder, sampler/utils, backends/helpers]
|
||||||
|
import pkg/constantine/math/[arithmetic, io/io_bigints, io/io_fields]
|
||||||
|
|
||||||
|
import ./utils
|
||||||
|
import ./create_circuits
|
||||||
|
|
||||||
|
type CircuitFiles* = object
|
||||||
|
r1cs*: string
|
||||||
|
wasm*: string
|
||||||
|
zkey*: string
|
||||||
|
inputs*: string
|
||||||
|
|
||||||
|
proc runArkCircom(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
|
||||||
|
echo "Loading sample proof..."
|
||||||
|
var
|
||||||
|
inputData = files.inputs.readFile()
|
||||||
|
inputJson = !JsonNode.parse(inputData)
|
||||||
|
proofInputs = Poseidon2Hash.jsonToProofInput(inputJson)
|
||||||
|
circom = CircomCompat.init(
|
||||||
|
files.r1cs,
|
||||||
|
files.wasm,
|
||||||
|
files.zkey,
|
||||||
|
slotDepth = args.depth,
|
||||||
|
numSamples = args.nsamples,
|
||||||
|
)
|
||||||
|
defer:
|
||||||
|
circom.release() # this comes from the rust FFI
|
||||||
|
|
||||||
|
echo "Sample proof loaded..."
|
||||||
|
echo "Proving..."
|
||||||
|
|
||||||
|
let nameArgs = getCircuitBenchStr(args)
|
||||||
|
var proof: CircomProof
|
||||||
|
benchmark fmt"prover-{nameArgs}", benchmarkLoops:
|
||||||
|
proof = circom.prove(proofInputs).tryGet
|
||||||
|
|
||||||
|
var verRes: bool
|
||||||
|
benchmark fmt"verify-{nameArgs}", benchmarkLoops:
|
||||||
|
verRes = circom.verify(proof, proofInputs).tryGet
|
||||||
|
echo "verify result: ", verRes
|
||||||
|
|
||||||
|
proc runRapidSnark(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
|
||||||
|
# time rapidsnark ${CIRCUIT_MAIN}.zkey witness.wtns proof.json public.json
|
||||||
|
|
||||||
|
echo "generating the witness..."
|
||||||
|
## TODO
|
||||||
|
|
||||||
|
proc runBenchmark(args: CircuitArgs, env: CircuitEnv, benchmarkLoops: int) =
|
||||||
|
## execute benchmarks given a set of args
|
||||||
|
## will create a folder in `benchmarks/circuit_bench_$(args)`
|
||||||
|
##
|
||||||
|
|
||||||
|
let env = createCircuit(args, env)
|
||||||
|
|
||||||
|
## TODO: copy over testcircomcompat proving
|
||||||
|
let files = CircuitFiles(
|
||||||
|
r1cs: env.dir / fmt"{env.name}.r1cs",
|
||||||
|
wasm: env.dir / fmt"{env.name}.wasm",
|
||||||
|
zkey: env.dir / fmt"{env.name}.zkey",
|
||||||
|
inputs: env.dir / fmt"input.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
runArkCircom(args, files, benchmarkLoops)
|
||||||
|
|
||||||
|
proc runAllBenchmarks*() =
|
||||||
|
echo "Running benchmark"
|
||||||
|
# setup()
|
||||||
|
var env = CircuitEnv.default()
|
||||||
|
env.check()
|
||||||
|
|
||||||
|
var args = CircuitArgs(
|
||||||
|
depth: 32, # maximum depth of the slot tree
|
||||||
|
maxslots: 256, # maximum number of slots
|
||||||
|
cellsize: 2048, # cell size in bytes
|
||||||
|
blocksize: 65536, # block size in bytes
|
||||||
|
nsamples: 1, # number of samples to prove
|
||||||
|
entropy: 1234567, # external randomness
|
||||||
|
seed: 12345, # seed for creating fake data
|
||||||
|
nslots: 11, # number of slots in the dataset
|
||||||
|
index: 3, # which slot we prove (0..NSLOTS-1)
|
||||||
|
ncells: 512, # number of cells in this slot
|
||||||
|
)
|
||||||
|
|
||||||
|
let
|
||||||
|
numberSamples = 3
|
||||||
|
benchmarkLoops = 5
|
||||||
|
|
||||||
|
for i in 1 .. numberSamples:
|
||||||
|
args.nsamples = i
|
||||||
|
stdout.styledWriteLine(fgYellow, "\nbenchmarking args: ", $args)
|
||||||
|
runBenchmark(args, env, benchmarkLoops)
|
||||||
|
|
||||||
|
printBenchMarkSummaries()
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
runAllBenchmarks()
|
|
@ -0,0 +1,76 @@
|
||||||
|
import std/tables
|
||||||
|
|
||||||
|
template withDir*(dir: string, blk: untyped) =
|
||||||
|
## set working dir for duration of blk
|
||||||
|
let prev = getCurrentDir()
|
||||||
|
try:
|
||||||
|
setCurrentDir(dir)
|
||||||
|
`blk`
|
||||||
|
finally:
|
||||||
|
setCurrentDir(prev)
|
||||||
|
|
||||||
|
template runit*(cmd: string) =
|
||||||
|
## run shell commands and verify it runs without an error code
|
||||||
|
echo "RUNNING: ", cmd
|
||||||
|
let cmdRes = execShellCmd(cmd)
|
||||||
|
echo "STATUS: ", cmdRes
|
||||||
|
assert cmdRes == 0
|
||||||
|
|
||||||
|
var benchRuns* = newTable[string, tuple[avgTimeSec: float, count: int]]()
|
||||||
|
|
||||||
|
func avg(vals: openArray[float]): float =
|
||||||
|
for v in vals:
|
||||||
|
result += v / vals.len().toFloat()
|
||||||
|
|
||||||
|
template benchmark*(name: untyped, count: int, blk: untyped) =
|
||||||
|
let benchmarkName: string = name
|
||||||
|
## simple benchmarking of a block of code
|
||||||
|
var runs = newSeqOfCap[float](count)
|
||||||
|
for i in 1 .. count:
|
||||||
|
block:
|
||||||
|
let t0 = epochTime()
|
||||||
|
`blk`
|
||||||
|
let elapsed = epochTime() - t0
|
||||||
|
runs.add elapsed
|
||||||
|
|
||||||
|
var elapsedStr = ""
|
||||||
|
for v in runs:
|
||||||
|
elapsedStr &= ", " & v.formatFloat(format = ffDecimal, precision = 3)
|
||||||
|
stdout.styledWriteLine(
|
||||||
|
fgGreen, "CPU Time [", benchmarkName, "] ", "avg(", $count, "): ", elapsedStr, " s"
|
||||||
|
)
|
||||||
|
benchRuns[benchmarkName] = (runs.avg(), count)
|
||||||
|
|
||||||
|
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
|
||||||
|
if printRegular:
|
||||||
|
echo ""
|
||||||
|
for k, v in benchRuns:
|
||||||
|
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
||||||
|
|
||||||
|
if printTsv:
|
||||||
|
echo ""
|
||||||
|
echo "name", "\t", "avgTimeSec", "\t", "count"
|
||||||
|
for k, v in benchRuns:
|
||||||
|
echo k, "\t", v.avgTimeSec, "\t", v.count
|
||||||
|
|
||||||
|
|
||||||
|
import std/math
|
||||||
|
|
||||||
|
func floorLog2*(x: int): int =
|
||||||
|
var k = -1
|
||||||
|
var y = x
|
||||||
|
while (y > 0):
|
||||||
|
k += 1
|
||||||
|
y = y shr 1
|
||||||
|
return k
|
||||||
|
|
||||||
|
func ceilingLog2*(x: int): int =
|
||||||
|
if (x == 0):
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return (floorLog2(x - 1) + 1)
|
||||||
|
|
||||||
|
func checkPowerOfTwo*(x: int, what: string): int =
|
||||||
|
let k = ceilingLog2(x)
|
||||||
|
assert(x == 2 ^ k, ("`" & what & "` is expected to be a power of 2"))
|
||||||
|
return x
|
|
@ -0,0 +1,91 @@
|
||||||
|
mode = ScriptMode.Verbose
|
||||||
|
|
||||||
|
|
||||||
|
### Helper functions
|
||||||
|
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||||
|
if not dirExists "build":
|
||||||
|
mkDir "build"
|
||||||
|
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
||||||
|
var extra_params = params
|
||||||
|
when compiles(commandLineParams):
|
||||||
|
for param in commandLineParams():
|
||||||
|
extra_params &= " " & param
|
||||||
|
else:
|
||||||
|
for i in 2..<paramCount():
|
||||||
|
extra_params &= " " & paramStr(i)
|
||||||
|
|
||||||
|
let cmd = "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
|
||||||
|
exec(cmd)
|
||||||
|
|
||||||
|
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||||
|
buildBinary name, srcDir, params
|
||||||
|
exec "build/" & name
|
||||||
|
|
||||||
|
task codex, "build codex binary":
|
||||||
|
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||||
|
|
||||||
|
task testCodex, "Build & run Codex tests":
|
||||||
|
test "testCodex", params = "-d:codex_enable_proof_failures=true"
|
||||||
|
|
||||||
|
task testContracts, "Build & run Codex Contract tests":
|
||||||
|
test "testContracts"
|
||||||
|
|
||||||
|
task testIntegration, "Run integration tests":
|
||||||
|
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||||
|
test "testIntegration"
|
||||||
|
|
||||||
|
task build, "build codex binary":
|
||||||
|
codexTask()
|
||||||
|
|
||||||
|
task test, "Run tests":
|
||||||
|
testCodexTask()
|
||||||
|
|
||||||
|
task testAll, "Run all tests (except for Taiko L2 tests)":
|
||||||
|
testCodexTask()
|
||||||
|
testContractsTask()
|
||||||
|
testIntegrationTask()
|
||||||
|
|
||||||
|
task testTaiko, "Run Taiko L2 tests":
|
||||||
|
codexTask()
|
||||||
|
test "testTaiko"
|
||||||
|
|
||||||
|
import strutils
|
||||||
|
import os
|
||||||
|
|
||||||
|
task coverage, "generates code coverage report":
|
||||||
|
var (output, exitCode) = gorgeEx("which lcov")
|
||||||
|
if exitCode != 0:
|
||||||
|
echo " ************************** ⛔️ ERROR ⛔️ **************************"
|
||||||
|
echo " ** ERROR: lcov not found, it must be installed to run code **"
|
||||||
|
echo " ** coverage locally **"
|
||||||
|
echo " *****************************************************************"
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
(output, exitCode) = gorgeEx("gcov --version")
|
||||||
|
if output.contains("Apple LLVM"):
|
||||||
|
echo " ************************* ⚠️ WARNING ⚠️ *************************"
|
||||||
|
echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
|
||||||
|
echo " ** emulates an old version of gcov (4.2.0) and therefore **"
|
||||||
|
echo " ** coverage results will differ than those on CI (which **"
|
||||||
|
echo " ** uses a much newer version of gcov). **"
|
||||||
|
echo " *****************************************************************"
|
||||||
|
|
||||||
|
var nimSrcs = " "
|
||||||
|
for f in walkDirRec("codex", {pcFile}):
|
||||||
|
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||||
|
|
||||||
|
echo "======== Running Tests ======== "
|
||||||
|
test "coverage", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
|
||||||
|
exec("rm nimcache/coverage/*.c")
|
||||||
|
rmDir("coverage"); mkDir("coverage")
|
||||||
|
echo " ======== Running LCOV ======== "
|
||||||
|
exec("lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info")
|
||||||
|
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
|
||||||
|
echo " ======== Generating HTML coverage report ======== "
|
||||||
|
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
||||||
|
echo " ======== Coverage report Done ======== "
|
||||||
|
|
||||||
|
task showCoverage, "open coverage html":
|
||||||
|
echo " ======== Opening HTML coverage report in browser... ======== "
|
||||||
|
if findExe("open") != "":
|
||||||
|
exec("open coverage/report/index.html")
|
161
codex.nim
161
codex.nim
|
@ -7,18 +7,28 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
import pkg/questionable
|
||||||
import pkg/confutils
|
import pkg/confutils
|
||||||
|
import pkg/confutils/defs
|
||||||
|
import pkg/confutils/std/net
|
||||||
|
import pkg/confutils/toml/defs as confTomlDefs
|
||||||
|
import pkg/confutils/toml/std/net as confTomlNet
|
||||||
|
import pkg/confutils/toml/std/uri as confTomlUri
|
||||||
|
import pkg/toml_serialization
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
|
||||||
import ./codex/conf
|
import ./codex/conf
|
||||||
import ./codex/codex
|
import ./codex/codex
|
||||||
|
import ./codex/logutils
|
||||||
|
import ./codex/units
|
||||||
import ./codex/utils/keyutils
|
import ./codex/utils/keyutils
|
||||||
|
import ./codex/codextypes
|
||||||
|
|
||||||
export codex, conf, libp2p, chronos, chronicles
|
export codex, conf, libp2p, chronos, logutils
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
|
import std/sequtils
|
||||||
import std/os
|
import std/os
|
||||||
import pkg/confutils/defs
|
import pkg/confutils/defs
|
||||||
import ./codex/utils/fileutils
|
import ./codex/utils/fileutils
|
||||||
|
@ -29,72 +39,117 @@ when isMainModule:
|
||||||
when defined(posix):
|
when defined(posix):
|
||||||
import system/ansi_c
|
import system/ansi_c
|
||||||
|
|
||||||
|
type
|
||||||
|
CodexStatus {.pure.} = enum
|
||||||
|
Stopped,
|
||||||
|
Stopping,
|
||||||
|
Running
|
||||||
|
|
||||||
let config = CodexConf.load(
|
let config = CodexConf.load(
|
||||||
version = codexFullVersion
|
version = codexFullVersion,
|
||||||
|
envVarsPrefix = "codex",
|
||||||
|
secondarySources = proc (config: CodexConf, sources: auto) =
|
||||||
|
if configFile =? config.configFile:
|
||||||
|
sources.addConfigFile(Toml, configFile)
|
||||||
)
|
)
|
||||||
config.setupLogging()
|
config.setupLogging()
|
||||||
config.setupMetrics()
|
config.setupMetrics()
|
||||||
|
|
||||||
case config.cmd:
|
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||||
of StartUpCommand.noCommand:
|
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||||
|
|
||||||
|
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||||
|
# We are unable to access/create data folder or data folder's
|
||||||
|
# permissions are insecure.
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
trace "Data dir initialized", dir = $config.dataDir
|
||||||
|
|
||||||
|
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||||
|
# We are unable to access/create data folder or data folder's
|
||||||
|
# permissions are insecure.
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||||
|
|
||||||
|
var
|
||||||
|
state: CodexStatus
|
||||||
|
shutdown: Future[void]
|
||||||
|
|
||||||
|
let
|
||||||
|
keyPath =
|
||||||
|
if isAbsolute(config.netPrivKeyFile):
|
||||||
|
config.netPrivKeyFile
|
||||||
|
else:
|
||||||
|
config.dataDir / config.netPrivKeyFile
|
||||||
|
|
||||||
|
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||||
|
server = try:
|
||||||
|
CodexServer.new(config, privateKey)
|
||||||
|
except Exception as exc:
|
||||||
|
error "Failed to start Codex", msg = exc.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
## Ctrl+C handling
|
||||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
proc doShutdown() =
|
||||||
|
shutdown = server.stop()
|
||||||
|
state = CodexStatus.Stopping
|
||||||
|
|
||||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
notice "Stopping Codex"
|
||||||
# We are unable to access/create data folder or data folder's
|
|
||||||
# permissions are insecure.
|
|
||||||
quit QuitFailure
|
|
||||||
|
|
||||||
trace "Data dir initialized", dir = $config.dataDir
|
proc controlCHandler() {.noconv.} =
|
||||||
|
when defined(windows):
|
||||||
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||||
|
try:
|
||||||
|
setupForeignThreadGc()
|
||||||
|
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||||
|
notice "Shutting down after having received SIGINT"
|
||||||
|
|
||||||
if not(checkAndCreateDataDir((config.dataDir / "repo").string)):
|
doShutdown()
|
||||||
# We are unable to access/create data folder or data folder's
|
|
||||||
# permissions are insecure.
|
|
||||||
quit QuitFailure
|
|
||||||
|
|
||||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
try:
|
||||||
|
setControlCHook(controlCHandler)
|
||||||
|
except Exception as exc: # TODO Exception
|
||||||
|
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||||
|
|
||||||
let
|
# equivalent SIGTERM handler
|
||||||
keyPath =
|
when defined(posix):
|
||||||
if isAbsolute(string config.netPrivKeyFile):
|
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||||
string config.netPrivKeyFile
|
notice "Shutting down after having received SIGTERM"
|
||||||
else:
|
|
||||||
string config.dataDir / string config.netPrivKeyFile
|
|
||||||
|
|
||||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
doShutdown()
|
||||||
server = CodexServer.new(config, privateKey)
|
|
||||||
|
|
||||||
## Ctrl+C handling
|
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||||
proc controlCHandler() {.noconv.} =
|
|
||||||
when defined(windows):
|
|
||||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
|
||||||
try:
|
|
||||||
setupForeignThreadGc()
|
|
||||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
|
||||||
notice "Shutting down after having received SIGINT"
|
|
||||||
waitFor server.stop()
|
|
||||||
|
|
||||||
try:
|
|
||||||
setControlCHook(controlCHandler)
|
|
||||||
except Exception as exc: # TODO Exception
|
|
||||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
|
||||||
|
|
||||||
# equivalent SIGTERM handler
|
|
||||||
when defined(posix):
|
|
||||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
|
||||||
notice "Shutting down after having received SIGTERM"
|
|
||||||
waitFor server.stop()
|
|
||||||
notice "Stopped Codex"
|
|
||||||
|
|
||||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
|
||||||
|
|
||||||
|
try:
|
||||||
waitFor server.start()
|
waitFor server.start()
|
||||||
notice "Exited codex"
|
except CatchableError as error:
|
||||||
|
error "Codex failed to start", error = error.msg
|
||||||
|
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
||||||
|
# but this would mean we'd have to fix the implementation of all
|
||||||
|
# services so they won't crash if we attempt to stop them before they
|
||||||
|
# had a chance to start (currently you'll get a SISGSEV if you try to).
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
of StartUpCommand.initNode:
|
state = CodexStatus.Running
|
||||||
discard
|
while state == CodexStatus.Running:
|
||||||
|
try:
|
||||||
|
# poll chronos
|
||||||
|
chronos.poll()
|
||||||
|
except Exception as exc:
|
||||||
|
error "Unhandled exception in async proc, aborting", msg = exc.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
try:
|
||||||
|
# signal handlers guarantee that the shutdown Future will
|
||||||
|
# be assigned before state switches to Stopping
|
||||||
|
waitFor shutdown
|
||||||
|
except CatchableError as error:
|
||||||
|
error "Codex didn't shutdown correctly", error = error.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
notice "Exited codex"
|
||||||
|
|
73
codex.nimble
73
codex.nimble
|
@ -1,78 +1,9 @@
|
||||||
mode = ScriptMode.Verbose
|
|
||||||
|
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
author = "Codex Team"
|
author = "Codex Team"
|
||||||
description = "p2p data durability engine"
|
description = "p2p data durability engine"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
binDir = "build"
|
binDir = "build"
|
||||||
srcDir = "."
|
srcDir = "."
|
||||||
|
installFiles = @["build.nims"]
|
||||||
|
|
||||||
requires "nim >= 1.2.0",
|
include "build.nims"
|
||||||
"asynctest >= 0.3.2 & < 0.4.0",
|
|
||||||
"bearssl >= 0.1.4",
|
|
||||||
"chronicles >= 0.7.2",
|
|
||||||
"chronos >= 2.5.2",
|
|
||||||
"confutils",
|
|
||||||
"ethers >= 0.2.0 & < 0.3.0",
|
|
||||||
"libbacktrace",
|
|
||||||
"libp2p",
|
|
||||||
"metrics",
|
|
||||||
"nimcrypto >= 0.4.1",
|
|
||||||
"nitro >= 0.5.1 & < 0.6.0",
|
|
||||||
"presto",
|
|
||||||
"protobuf_serialization >= 0.2.0 & < 0.3.0",
|
|
||||||
"questionable >= 0.10.6 & < 0.11.0",
|
|
||||||
"secp256k1",
|
|
||||||
"stew",
|
|
||||||
"upraises >= 0.1.0 & < 0.2.0",
|
|
||||||
"lrucache",
|
|
||||||
"leopard >= 0.1.0 & < 0.2.0",
|
|
||||||
"blscurve",
|
|
||||||
"libp2pdht",
|
|
||||||
"eth"
|
|
||||||
|
|
||||||
when declared(namedBin):
|
|
||||||
namedBin = {
|
|
||||||
"codex/codex": "codex"
|
|
||||||
}.toTable()
|
|
||||||
|
|
||||||
### Helper functions
|
|
||||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
|
||||||
if not dirExists "build":
|
|
||||||
mkDir "build"
|
|
||||||
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
|
||||||
var extra_params = params
|
|
||||||
when compiles(commandLineParams):
|
|
||||||
for param in commandLineParams:
|
|
||||||
extra_params &= " " & param
|
|
||||||
else:
|
|
||||||
for i in 2..<paramCount():
|
|
||||||
extra_params &= " " & paramStr(i)
|
|
||||||
|
|
||||||
|
|
||||||
exec "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
|
|
||||||
|
|
||||||
proc test(name: string, srcDir = "tests/", lang = "c") =
|
|
||||||
buildBinary name, srcDir
|
|
||||||
exec "build/" & name
|
|
||||||
|
|
||||||
task codex, "build codex binary":
|
|
||||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
|
||||||
|
|
||||||
task testCodex, "Build & run Codex tests":
|
|
||||||
test "testCodex"
|
|
||||||
|
|
||||||
task testContracts, "Build & run Codex Contract tests":
|
|
||||||
test "testContracts"
|
|
||||||
|
|
||||||
task testIntegration, "Run integration tests":
|
|
||||||
codexTask()
|
|
||||||
test "testIntegration"
|
|
||||||
|
|
||||||
task test, "Run tests":
|
|
||||||
testCodexTask()
|
|
||||||
|
|
||||||
task testAll, "Run all tests":
|
|
||||||
testCodexTask()
|
|
||||||
testContractsTask()
|
|
||||||
testIntegrationTask()
|
|
||||||
|
|
|
@ -10,26 +10,28 @@
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
import pkg/libp2p/cid
|
||||||
import pkg/libp2p
|
import pkg/libp2p/multicodec
|
||||||
import pkg/metrics
|
import pkg/metrics
|
||||||
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
||||||
import ../protobuf/presence
|
import ./pendingblocks
|
||||||
|
|
||||||
|
import ../protobuf/presence
|
||||||
import ../network
|
import ../network
|
||||||
import ../peers
|
import ../peers
|
||||||
|
|
||||||
import ../../utils
|
import ../../utils
|
||||||
import ../../discovery
|
import ../../discovery
|
||||||
import ../../stores/blockstore
|
import ../../stores/blockstore
|
||||||
|
import ../../logutils
|
||||||
import ./pendingblocks
|
import ../../manifest
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex discoveryengine"
|
topics = "codex discoveryengine"
|
||||||
|
|
||||||
declareGauge(codex_inflight_discovery, "inflight discovery requests")
|
declareGauge(codexInflightDiscovery, "inflight discovery requests")
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultConcurrentDiscRequests = 10
|
DefaultConcurrentDiscRequests = 10
|
||||||
|
@ -37,7 +39,7 @@ const
|
||||||
DefaultDiscoveryTimeout = 1.minutes
|
DefaultDiscoveryTimeout = 1.minutes
|
||||||
DefaultMinPeersPerBlock = 3
|
DefaultMinPeersPerBlock = 3
|
||||||
DefaultDiscoveryLoopSleep = 3.seconds
|
DefaultDiscoveryLoopSleep = 3.seconds
|
||||||
DefaultAdvertiseLoopSleep = 3.seconds
|
DefaultAdvertiseLoopSleep = 30.minutes
|
||||||
|
|
||||||
type
|
type
|
||||||
DiscoveryEngine* = ref object of RootObj
|
DiscoveryEngine* = ref object of RootObj
|
||||||
|
@ -60,41 +62,55 @@ type
|
||||||
advertiseLoopSleep: Duration # Advertise loop sleep
|
advertiseLoopSleep: Duration # Advertise loop sleep
|
||||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
||||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||||
|
advertiseType*: BlockType # Advertice blocks, manifests or both
|
||||||
|
|
||||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||||
while b.discEngineRunning:
|
while b.discEngineRunning:
|
||||||
for cid in toSeq(b.pendingBlocks.wantList):
|
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||||
try:
|
try:
|
||||||
await b.discoveryQueue.put(cid)
|
await b.discoveryQueue.put(cid)
|
||||||
|
except CancelledError:
|
||||||
|
trace "Discovery loop cancelled"
|
||||||
|
return
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Exception in discovery loop", exc = exc.msg
|
warn "Exception in discovery loop", exc = exc.msg
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
sleep = b.discoveryLoopSleep
|
sleep = b.discoveryLoopSleep
|
||||||
wanted = b.pendingBlocks.len
|
wanted = b.pendingBlocks.len
|
||||||
|
|
||||||
trace "About to sleep discovery loop"
|
|
||||||
await sleepAsync(b.discoveryLoopSleep)
|
await sleepAsync(b.discoveryLoopSleep)
|
||||||
|
|
||||||
proc advertiseQueueLoop*(b: DiscoveryEngine) {.async.} =
|
proc advertiseBlock(b: DiscoveryEngine, cid: Cid) {.async.} =
|
||||||
proc onBlock(cid: Cid) {.async.} =
|
without isM =? cid.isManifest, err:
|
||||||
try:
|
warn "Unable to determine if cid is manifest"
|
||||||
trace "Listed block", cid
|
return
|
||||||
await b.advertiseQueue.put(cid)
|
|
||||||
await sleepAsync(50.millis) # TODO: temp workaround because we're announcing all CIDs
|
|
||||||
except CancelledError as exc:
|
|
||||||
trace "Cancelling block listing"
|
|
||||||
raise exc
|
|
||||||
except CatchableError as exc:
|
|
||||||
trace "Exception listing blocks", exc = exc.msg
|
|
||||||
|
|
||||||
|
if isM:
|
||||||
|
without blk =? await b.localStore.getBlock(cid), err:
|
||||||
|
error "Error retrieving manifest block", cid, err = err.msg
|
||||||
|
return
|
||||||
|
|
||||||
|
without manifest =? Manifest.decode(blk), err:
|
||||||
|
error "Unable to decode as manifest", err = err.msg
|
||||||
|
return
|
||||||
|
|
||||||
|
# announce manifest cid and tree cid
|
||||||
|
await b.advertiseQueue.put(cid)
|
||||||
|
await b.advertiseQueue.put(manifest.treeCid)
|
||||||
|
|
||||||
|
proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||||
while b.discEngineRunning:
|
while b.discEngineRunning:
|
||||||
discard await b.localStore.listBlocks(onBlock)
|
if cids =? await b.localStore.listBlocks(blockType = b.advertiseType):
|
||||||
|
trace "Begin iterating blocks..."
|
||||||
|
for c in cids:
|
||||||
|
if cid =? await c:
|
||||||
|
await b.advertiseBlock(cid)
|
||||||
|
trace "Iterating blocks finished."
|
||||||
|
|
||||||
trace "About to sleep advertise loop", sleep = b.advertiseLoopSleep
|
|
||||||
await sleepAsync(b.advertiseLoopSleep)
|
await sleepAsync(b.advertiseLoopSleep)
|
||||||
|
|
||||||
trace "Exiting advertise task loop"
|
info "Exiting advertise task loop"
|
||||||
|
|
||||||
proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
## Run advertise tasks
|
## Run advertise tasks
|
||||||
|
@ -106,7 +122,6 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
cid = await b.advertiseQueue.get()
|
cid = await b.advertiseQueue.get()
|
||||||
|
|
||||||
if cid in b.inFlightAdvReqs:
|
if cid in b.inFlightAdvReqs:
|
||||||
trace "Advertise request already in progress", cid
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -114,18 +129,19 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
request = b.discovery.provide(cid)
|
request = b.discovery.provide(cid)
|
||||||
|
|
||||||
b.inFlightAdvReqs[cid] = request
|
b.inFlightAdvReqs[cid] = request
|
||||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||||
trace "Advertising block", cid, inflight = b.inFlightAdvReqs.len
|
|
||||||
await request
|
await request
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
b.inFlightAdvReqs.del(cid)
|
b.inFlightAdvReqs.del(cid)
|
||||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||||
trace "Advertised block", cid, inflight = b.inFlightAdvReqs.len
|
except CancelledError:
|
||||||
|
trace "Advertise task cancelled"
|
||||||
|
return
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Exception in advertise task runner", exc = exc.msg
|
warn "Exception in advertise task runner", exc = exc.msg
|
||||||
|
|
||||||
trace "Exiting advertise task runner"
|
info "Exiting advertise task runner"
|
||||||
|
|
||||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
## Run discovery tasks
|
## Run discovery tasks
|
||||||
|
@ -143,9 +159,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
let
|
let
|
||||||
haves = b.peers.peersHave(cid)
|
haves = b.peers.peersHave(cid)
|
||||||
|
|
||||||
trace "Current number of peers for block", cid, count = haves.len
|
|
||||||
if haves.len < b.minPeersPerBlock:
|
if haves.len < b.minPeersPerBlock:
|
||||||
trace "Discovering block", cid
|
|
||||||
try:
|
try:
|
||||||
let
|
let
|
||||||
request = b.discovery
|
request = b.discovery
|
||||||
|
@ -153,11 +167,10 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
.wait(DefaultDiscoveryTimeout)
|
.wait(DefaultDiscoveryTimeout)
|
||||||
|
|
||||||
b.inFlightDiscReqs[cid] = request
|
b.inFlightDiscReqs[cid] = request
|
||||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||||
let
|
let
|
||||||
peers = await request
|
peers = await request
|
||||||
|
|
||||||
trace "Discovered peers", peers = peers.len
|
|
||||||
let
|
let
|
||||||
dialed = await allFinished(
|
dialed = await allFinished(
|
||||||
peers.mapIt( b.network.dialPeer(it.data) ))
|
peers.mapIt( b.network.dialPeer(it.data) ))
|
||||||
|
@ -168,29 +181,30 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
b.inFlightDiscReqs.del(cid)
|
b.inFlightDiscReqs.del(cid)
|
||||||
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
|
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
|
||||||
|
except CancelledError:
|
||||||
|
trace "Discovery task cancelled"
|
||||||
|
return
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Exception in discovery task runner", exc = exc.msg
|
warn "Exception in discovery task runner", exc = exc.msg
|
||||||
|
|
||||||
trace "Exiting discovery task runner"
|
info "Exiting discovery task runner"
|
||||||
|
|
||||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||||
for cid in cids:
|
for cid in cids:
|
||||||
if cid notin b.discoveryQueue:
|
if cid notin b.discoveryQueue:
|
||||||
try:
|
try:
|
||||||
trace "Queueing find block", cid, queue = b.discoveryQueue.len
|
|
||||||
b.discoveryQueue.putNoWait(cid)
|
b.discoveryQueue.putNoWait(cid)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Exception queueing discovery request", exc = exc.msg
|
warn "Exception queueing discovery request", exc = exc.msg
|
||||||
|
|
||||||
proc queueProvideBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
proc queueProvideBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||||
for cid in cids:
|
for cid in cids:
|
||||||
if cid notin b.advertiseQueue:
|
if cid notin b.advertiseQueue:
|
||||||
try:
|
try:
|
||||||
trace "Queueing provide block", cid, queue = b.discoveryQueue.len
|
|
||||||
b.advertiseQueue.putNoWait(cid)
|
b.advertiseQueue.putNoWait(cid)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Exception queueing discovery request", exc = exc.msg
|
warn "Exception queueing discovery request", exc = exc.msg
|
||||||
|
|
||||||
proc start*(b: DiscoveryEngine) {.async.} =
|
proc start*(b: DiscoveryEngine) {.async.} =
|
||||||
## Start the discengine task
|
## Start the discengine task
|
||||||
|
@ -222,16 +236,16 @@ proc stop*(b: DiscoveryEngine) {.async.} =
|
||||||
return
|
return
|
||||||
|
|
||||||
b.discEngineRunning = false
|
b.discEngineRunning = false
|
||||||
for t in b.advertiseTasks:
|
for task in b.advertiseTasks:
|
||||||
if not t.finished:
|
if not task.finished:
|
||||||
trace "Awaiting advertise task to stop"
|
trace "Awaiting advertise task to stop"
|
||||||
await t.cancelAndWait()
|
await task.cancelAndWait()
|
||||||
trace "Advertise task stopped"
|
trace "Advertise task stopped"
|
||||||
|
|
||||||
for t in b.discoveryTasks:
|
for task in b.discoveryTasks:
|
||||||
if not t.finished:
|
if not task.finished:
|
||||||
trace "Awaiting discovery task to stop"
|
trace "Awaiting discovery task to stop"
|
||||||
await t.cancelAndWait()
|
await task.cancelAndWait()
|
||||||
trace "Discovery task stopped"
|
trace "Discovery task stopped"
|
||||||
|
|
||||||
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:
|
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:
|
||||||
|
@ -247,18 +261,22 @@ proc stop*(b: DiscoveryEngine) {.async.} =
|
||||||
trace "Discovery engine stopped"
|
trace "Discovery engine stopped"
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type DiscoveryEngine,
|
T: type DiscoveryEngine,
|
||||||
localStore: BlockStore,
|
localStore: BlockStore,
|
||||||
peers: PeerCtxStore,
|
peers: PeerCtxStore,
|
||||||
network: BlockExcNetwork,
|
network: BlockExcNetwork,
|
||||||
discovery: Discovery,
|
discovery: Discovery,
|
||||||
pendingBlocks: PendingBlocksManager,
|
pendingBlocks: PendingBlocksManager,
|
||||||
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
||||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||||
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
|
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
|
||||||
minPeersPerBlock = DefaultMinPeersPerBlock,): DiscoveryEngine =
|
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||||
T(
|
advertiseType = BlockType.Manifest
|
||||||
|
): DiscoveryEngine =
|
||||||
|
## Create a discovery engine instance for advertising services
|
||||||
|
##
|
||||||
|
DiscoveryEngine(
|
||||||
localStore: localStore,
|
localStore: localStore,
|
||||||
peers: peers,
|
peers: peers,
|
||||||
network: network,
|
network: network,
|
||||||
|
@ -272,4 +290,5 @@ proc new*(
|
||||||
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
||||||
discoveryLoopSleep: discoveryLoopSleep,
|
discoveryLoopSleep: discoveryLoopSleep,
|
||||||
advertiseLoopSleep: advertiseLoopSleep,
|
advertiseLoopSleep: advertiseLoopSleep,
|
||||||
minPeersPerBlock: minPeersPerBlock)
|
minPeersPerBlock: minPeersPerBlock,
|
||||||
|
advertiseType: advertiseType)
|
||||||
|
|
|
@ -11,15 +11,20 @@ import std/sequtils
|
||||||
import std/sets
|
import std/sets
|
||||||
import std/options
|
import std/options
|
||||||
import std/algorithm
|
import std/algorithm
|
||||||
|
import std/sugar
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
import pkg/libp2p/[cid, switch, multihash, multicodec]
|
||||||
import pkg/libp2p
|
import pkg/metrics
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
|
import pkg/questionable
|
||||||
|
|
||||||
import ../../stores/blockstore
|
import ../../stores/blockstore
|
||||||
import ../../blocktype as bt
|
import ../../blocktype
|
||||||
import ../../utils
|
import ../../utils
|
||||||
|
import ../../merkletree
|
||||||
|
import ../../logutils
|
||||||
|
import ../../manifest
|
||||||
|
|
||||||
import ../protobuf/blockexc
|
import ../protobuf/blockexc
|
||||||
import ../protobuf/presence
|
import ../protobuf/presence
|
||||||
|
@ -36,16 +41,23 @@ export peers, pendingblocks, payments, discovery
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex blockexcengine"
|
topics = "codex blockexcengine"
|
||||||
|
|
||||||
|
declareCounter(codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent")
|
||||||
|
declareCounter(codex_block_exchange_want_have_lists_received, "codex blockexchange wantHave lists received")
|
||||||
|
declareCounter(codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent")
|
||||||
|
declareCounter(codex_block_exchange_want_block_lists_received, "codex blockexchange wantBlock lists received")
|
||||||
|
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
|
||||||
|
declareCounter(codex_block_exchange_blocks_received, "codex blockexchange blocks received")
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultMaxPeersPerRequest* = 10
|
DefaultMaxPeersPerRequest* = 10
|
||||||
DefaultTaskQueueSize = 100
|
DefaultTaskQueueSize = 100
|
||||||
DefaultConcurrentTasks = 10
|
DefaultConcurrentTasks = 10
|
||||||
DefaultMaxRetries = 3
|
# DefaultMaxRetries = 3
|
||||||
DefaultConcurrentDiscRequests = 10
|
# DefaultConcurrentDiscRequests = 10
|
||||||
DefaultConcurrentAdvertRequests = 10
|
# DefaultConcurrentAdvertRequests = 10
|
||||||
DefaultDiscoveryTimeout = 1.minutes
|
# DefaultDiscoveryTimeout = 1.minutes
|
||||||
DefaultMaxQueriedBlocksCache = 1000
|
# DefaultMaxQueriedBlocksCache = 1000
|
||||||
DefaultMinPeersPerBlock = 3
|
# DefaultMinPeersPerBlock = 3
|
||||||
|
|
||||||
type
|
type
|
||||||
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
|
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
|
||||||
|
@ -63,18 +75,13 @@ type
|
||||||
peersPerRequest: int # Max number of peers to request from
|
peersPerRequest: int # Max number of peers to request from
|
||||||
wallet*: WalletRef # Nitro wallet for micropayments
|
wallet*: WalletRef # Nitro wallet for micropayments
|
||||||
pricing*: ?Pricing # Optional bandwidth pricing
|
pricing*: ?Pricing # Optional bandwidth pricing
|
||||||
|
blockFetchTimeout*: Duration # Timeout for fetching blocks over the network
|
||||||
discovery*: DiscoveryEngine
|
discovery*: DiscoveryEngine
|
||||||
|
|
||||||
Pricing* = object
|
Pricing* = object
|
||||||
address*: EthAddress
|
address*: EthAddress
|
||||||
price*: UInt256
|
price*: UInt256
|
||||||
|
|
||||||
proc contains*(a: AsyncHeapQueue[Entry], b: Cid): bool =
|
|
||||||
## Convenience method to check for entry prepense
|
|
||||||
##
|
|
||||||
|
|
||||||
a.anyIt( it.cid == b )
|
|
||||||
|
|
||||||
# attach task scheduler to engine
|
# attach task scheduler to engine
|
||||||
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
|
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
|
||||||
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
||||||
|
@ -108,100 +115,106 @@ proc stop*(b: BlockExcEngine) {.async.} =
|
||||||
return
|
return
|
||||||
|
|
||||||
b.blockexcRunning = false
|
b.blockexcRunning = false
|
||||||
for t in b.blockexcTasks:
|
for task in b.blockexcTasks:
|
||||||
if not t.finished:
|
if not task.finished:
|
||||||
trace "Awaiting task to stop"
|
trace "Awaiting task to stop"
|
||||||
await t.cancelAndWait()
|
await task.cancelAndWait()
|
||||||
trace "Task stopped"
|
trace "Task stopped"
|
||||||
|
|
||||||
trace "NetworkStore stopped"
|
trace "NetworkStore stopped"
|
||||||
|
|
||||||
proc requestBlock*(
|
proc sendWantHave(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine,
|
||||||
cid: Cid,
|
address: BlockAddress, # pluralize this entire call chain, please
|
||||||
timeout = DefaultBlockTimeout): Future[bt.Block] {.async.} =
|
excluded: seq[BlockExcPeerCtx],
|
||||||
## Request a block from remotes
|
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
|
||||||
##
|
trace "Sending wantHave request to peers", address
|
||||||
|
for p in peers:
|
||||||
|
if p notin excluded:
|
||||||
|
if address notin p.peerHave:
|
||||||
|
await b.network.request.sendWantList(
|
||||||
|
p.id,
|
||||||
|
@[address],
|
||||||
|
wantType = WantType.WantHave) # we only want to know if the peer has the block
|
||||||
|
|
||||||
trace "Requesting block", cid, peers = b.peers.len
|
proc sendWantBlock(
|
||||||
|
b: BlockExcEngine,
|
||||||
if b.pendingBlocks.isInFlight(cid):
|
address: BlockAddress, # pluralize this entire call chain, please
|
||||||
trace "Request handle already pending", cid
|
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
|
||||||
return await b.pendingBlocks.getWantHandle(cid, timeout)
|
trace "Sending wantBlock request to", peer = blockPeer.id, address
|
||||||
|
|
||||||
let
|
|
||||||
blk = b.pendingBlocks.getWantHandle(cid, timeout)
|
|
||||||
|
|
||||||
var
|
|
||||||
peers = b.peers.selectCheapest(cid)
|
|
||||||
|
|
||||||
if peers.len <= 0:
|
|
||||||
trace "No cheapest peers, selecting first in list", cid
|
|
||||||
peers = toSeq(b.peers) # Get any peer
|
|
||||||
if peers.len <= 0:
|
|
||||||
trace "No peers to request blocks from", cid
|
|
||||||
b.discovery.queueFindBlocksReq(@[cid])
|
|
||||||
return await blk
|
|
||||||
|
|
||||||
let
|
|
||||||
blockPeer = peers[0] # get cheapest
|
|
||||||
|
|
||||||
proc blockHandleMonitor() {.async.} =
|
|
||||||
try:
|
|
||||||
trace "Monigoring block handle", cid
|
|
||||||
b.pendingBlocks.setInFlight(cid, true)
|
|
||||||
discard await blk
|
|
||||||
trace "Block handle success", cid
|
|
||||||
except CatchableError as exc:
|
|
||||||
trace "Error block handle, disconnecting peer", cid, exc = exc.msg
|
|
||||||
|
|
||||||
# TODO: really, this is just a quick and dirty way of
|
|
||||||
# preventing hitting the same "bad" peer every time, however,
|
|
||||||
# we might as well discover this on or next iteration, so
|
|
||||||
# it doesn't mean that we're never talking to this peer again.
|
|
||||||
# TODO: we need a lot more work around peer selection and
|
|
||||||
# prioritization
|
|
||||||
|
|
||||||
# drop unresponsive peer
|
|
||||||
await b.network.switch.disconnect(blockPeer.id)
|
|
||||||
|
|
||||||
trace "Sending block request to peer", peer = blockPeer.id, cid
|
|
||||||
|
|
||||||
# monitor block handle
|
|
||||||
asyncSpawn blockHandleMonitor()
|
|
||||||
|
|
||||||
# request block
|
|
||||||
await b.network.request.sendWantList(
|
await b.network.request.sendWantList(
|
||||||
blockPeer.id,
|
blockPeer.id,
|
||||||
@[cid],
|
@[address],
|
||||||
wantType = WantType.WantBlock) # we want this remote to send us a block
|
wantType = WantType.WantBlock) # we want this remote to send us a block
|
||||||
|
|
||||||
if (peers.len - 1) == 0:
|
proc monitorBlockHandle(
|
||||||
trace "No peers to send want list to", cid
|
b: BlockExcEngine,
|
||||||
b.discovery.queueFindBlocksReq(@[cid])
|
handle: Future[Block],
|
||||||
return await blk # no peers to send wants to
|
address: BlockAddress,
|
||||||
|
peerId: PeerId) {.async.} =
|
||||||
|
|
||||||
# filter out the peer we've already requested from
|
try:
|
||||||
let remaining = peers[1..min(peers.high, b.peersPerRequest)]
|
discard await handle
|
||||||
trace "Sending want list to remaining peers", count = remaining.len
|
except CancelledError as exc:
|
||||||
for p in remaining:
|
trace "Block handle cancelled", address, peerId
|
||||||
if cid notin p.peerHave:
|
except CatchableError as exc:
|
||||||
# just send wants
|
warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
|
||||||
await b.network.request.sendWantList(
|
|
||||||
p.id,
|
|
||||||
@[cid],
|
|
||||||
wantType = WantType.WantHave) # we only want to know if the peer has the block
|
|
||||||
|
|
||||||
return await blk
|
# TODO: really, this is just a quick and dirty way of
|
||||||
|
# preventing hitting the same "bad" peer every time, however,
|
||||||
|
# we might as well discover this on or next iteration, so
|
||||||
|
# it doesn't mean that we're never talking to this peer again.
|
||||||
|
# TODO: we need a lot more work around peer selection and
|
||||||
|
# prioritization
|
||||||
|
|
||||||
|
# drop unresponsive peer
|
||||||
|
await b.network.switch.disconnect(peerId)
|
||||||
|
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||||
|
|
||||||
|
proc requestBlock*(
|
||||||
|
b: BlockExcEngine,
|
||||||
|
address: BlockAddress,
|
||||||
|
): Future[?!Block] {.async.} =
|
||||||
|
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
|
||||||
|
|
||||||
|
if not b.pendingBlocks.isInFlight(address):
|
||||||
|
let peers = b.peers.selectCheapest(address)
|
||||||
|
if peers.len == 0:
|
||||||
|
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||||
|
|
||||||
|
let maybePeer =
|
||||||
|
if peers.len > 0:
|
||||||
|
peers[hash(address) mod peers.len].some
|
||||||
|
elif b.peers.len > 0:
|
||||||
|
toSeq(b.peers)[hash(address) mod b.peers.len].some
|
||||||
|
else:
|
||||||
|
BlockExcPeerCtx.none
|
||||||
|
|
||||||
|
if peer =? maybePeer:
|
||||||
|
asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id)
|
||||||
|
b.pendingBlocks.setInFlight(address)
|
||||||
|
await b.sendWantBlock(address, peer)
|
||||||
|
codex_block_exchange_want_block_lists_sent.inc()
|
||||||
|
await b.sendWantHave(address, @[peer], toSeq(b.peers))
|
||||||
|
codex_block_exchange_want_have_lists_sent.inc()
|
||||||
|
|
||||||
|
# Don't let timeouts bubble up. We can't be too broad here or we break
|
||||||
|
# cancellations.
|
||||||
|
try:
|
||||||
|
success await blockFuture
|
||||||
|
except AsyncTimeoutError as err:
|
||||||
|
failure err
|
||||||
|
|
||||||
|
proc requestBlock*(
|
||||||
|
b: BlockExcEngine,
|
||||||
|
cid: Cid
|
||||||
|
): Future[?!Block] =
|
||||||
|
b.requestBlock(BlockAddress.init(cid))
|
||||||
|
|
||||||
proc blockPresenceHandler*(
|
proc blockPresenceHandler*(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
blocks: seq[BlockPresence]) {.async.} =
|
blocks: seq[BlockPresence]) {.async.} =
|
||||||
## Handle block presence
|
|
||||||
##
|
|
||||||
|
|
||||||
trace "Received presence update for peer", peer, blocks = blocks.len
|
|
||||||
let
|
let
|
||||||
peerCtx = b.peers.get(peer)
|
peerCtx = b.peers.get(peer)
|
||||||
wantList = toSeq(b.pendingBlocks.wantList)
|
wantList = toSeq(b.pendingBlocks.wantList)
|
||||||
|
@ -211,12 +224,6 @@ proc blockPresenceHandler*(
|
||||||
|
|
||||||
for blk in blocks:
|
for blk in blocks:
|
||||||
if presence =? Presence.init(blk):
|
if presence =? Presence.init(blk):
|
||||||
logScope:
|
|
||||||
cid = presence.cid
|
|
||||||
have = presence.have
|
|
||||||
price = presence.price
|
|
||||||
|
|
||||||
trace "Updating precense"
|
|
||||||
peerCtx.setPresence(presence)
|
peerCtx.setPresence(presence)
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -226,166 +233,237 @@ proc blockPresenceHandler*(
|
||||||
)
|
)
|
||||||
|
|
||||||
if dontWantCids.len > 0:
|
if dontWantCids.len > 0:
|
||||||
trace "Cleaning peer haves", peer, count = dontWantCids.len
|
|
||||||
peerCtx.cleanPresence(dontWantCids)
|
peerCtx.cleanPresence(dontWantCids)
|
||||||
|
|
||||||
trace "Peer want/have", items = peerHave.len, wantList = wantList.len
|
|
||||||
let
|
let
|
||||||
wantCids = wantList.filterIt(
|
wantCids = wantList.filterIt(
|
||||||
it in peerHave
|
it in peerHave
|
||||||
)
|
)
|
||||||
|
|
||||||
if wantCids.len > 0:
|
if wantCids.len > 0:
|
||||||
trace "Getting blocks based on updated precense", peer, count = wantCids.len
|
trace "Peer has blocks in our wantList", peer, wantCount = wantCids.len
|
||||||
discard await allFinished(
|
discard await allFinished(
|
||||||
wantCids.mapIt(b.requestBlock(it)))
|
wantCids.mapIt(b.sendWantBlock(it, peerCtx)))
|
||||||
trace "Requested blocks based on updated precense", peer, count = wantCids.len
|
|
||||||
|
|
||||||
# if none of the connected peers report our wants in their have list,
|
# if none of the connected peers report our wants in their have list,
|
||||||
# fire up discovery
|
# fire up discovery
|
||||||
b.discovery.queueFindBlocksReq(
|
b.discovery.queueFindBlocksReq(
|
||||||
toSeq(b.pendingBlocks.wantList)
|
toSeq(b.pendingBlocks.wantListCids)
|
||||||
.filter do(cid: Cid) -> bool:
|
.filter do(cid: Cid) -> bool:
|
||||||
not b.peers.anyIt( cid in it.peerHave ))
|
not b.peers.anyIt( cid in it.peerHaveCids ))
|
||||||
|
|
||||||
proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
|
|
||||||
trace "Schedule a task for new blocks", items = blocks.len
|
|
||||||
|
|
||||||
|
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||||
let
|
let
|
||||||
cids = blocks.mapIt( it.cid )
|
cids = blocksDelivery.mapIt( it.blk.cid )
|
||||||
|
|
||||||
# schedule any new peers to provide blocks to
|
# schedule any new peers to provide blocks to
|
||||||
for p in b.peers:
|
for p in b.peers:
|
||||||
for c in cids: # for each cid
|
for c in cids: # for each cid
|
||||||
# schedule a peer if it wants at least one cid
|
# schedule a peer if it wants at least one cid
|
||||||
# and we have it in our local store
|
# and we have it in our local store
|
||||||
if c in p.peerWants:
|
if c in p.peerWantsCids:
|
||||||
if await (c in b.localStore):
|
if await (c in b.localStore):
|
||||||
if b.scheduleTask(p):
|
if b.scheduleTask(p):
|
||||||
trace "Task scheduled for peer", peer = p.id
|
trace "Task scheduled for peer", peer = p.id
|
||||||
else:
|
else:
|
||||||
trace "Unable to schedule task for peer", peer = p.id
|
warn "Unable to schedule task for peer", peer = p.id
|
||||||
|
|
||||||
break # do next peer
|
break # do next peer
|
||||||
|
|
||||||
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
|
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
||||||
## Resolve pending blocks from the pending blocks manager
|
## Tells neighboring peers that we're no longer interested in a block.
|
||||||
## and schedule any new task to be ran
|
trace "Sending block request cancellations to peers", addrs = addrs.len
|
||||||
##
|
|
||||||
|
|
||||||
trace "Resolving blocks", blocks = blocks.len
|
let failed = (await allFinished(
|
||||||
|
b.peers.mapIt(
|
||||||
|
b.network.request.sendWantCancellations(
|
||||||
|
peer = it.id,
|
||||||
|
addresses = addrs))))
|
||||||
|
.filterIt(it.failed)
|
||||||
|
|
||||||
b.pendingBlocks.resolve(blocks)
|
if failed.len > 0:
|
||||||
await b.scheduleTasks(blocks)
|
warn "Failed to send block request cancellations to peers", peers = failed.len
|
||||||
b.discovery.queueProvideBlocksReq(blocks.mapIt( it.cid ))
|
|
||||||
|
proc getAnnouceCids(blocksDelivery: seq[BlockDelivery]): seq[Cid] =
|
||||||
|
var cids = initHashSet[Cid]()
|
||||||
|
for bd in blocksDelivery:
|
||||||
|
if bd.address.leaf:
|
||||||
|
cids.incl(bd.address.treeCid)
|
||||||
|
else:
|
||||||
|
without isM =? bd.address.cid.isManifest, err:
|
||||||
|
warn "Unable to determine if cid is manifest"
|
||||||
|
continue
|
||||||
|
if isM:
|
||||||
|
cids.incl(bd.address.cid)
|
||||||
|
return cids.toSeq
|
||||||
|
|
||||||
|
proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||||
|
b.pendingBlocks.resolve(blocksDelivery)
|
||||||
|
await b.scheduleTasks(blocksDelivery)
|
||||||
|
let announceCids = getAnnouceCids(blocksDelivery)
|
||||||
|
await b.cancelBlocks(blocksDelivery.mapIt(it.address))
|
||||||
|
|
||||||
|
b.discovery.queueProvideBlocksReq(announceCids)
|
||||||
|
|
||||||
|
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
||||||
|
await b.resolveBlocks(
|
||||||
|
blocks.mapIt(
|
||||||
|
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)
|
||||||
|
)))
|
||||||
|
|
||||||
proc payForBlocks(engine: BlockExcEngine,
|
proc payForBlocks(engine: BlockExcEngine,
|
||||||
peer: BlockExcPeerCtx,
|
peer: BlockExcPeerCtx,
|
||||||
blocks: seq[bt.Block]) {.async.} =
|
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||||
trace "Paying for blocks", blocks = blocks.len
|
|
||||||
|
|
||||||
let
|
let
|
||||||
sendPayment = engine.network.request.sendPayment
|
sendPayment = engine.network.request.sendPayment
|
||||||
price = peer.price(blocks.mapIt(it.cid))
|
price = peer.price(blocksDelivery.mapIt(it.address))
|
||||||
|
|
||||||
if payment =? engine.wallet.pay(peer, price):
|
if payment =? engine.wallet.pay(peer, price):
|
||||||
trace "Sending payment for blocks", price
|
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
||||||
await sendPayment(peer.id, payment)
|
await sendPayment(peer.id, payment)
|
||||||
|
|
||||||
proc blocksHandler*(
|
proc validateBlockDelivery(
|
||||||
|
b: BlockExcEngine,
|
||||||
|
bd: BlockDelivery): ?!void =
|
||||||
|
if bd.address notin b.pendingBlocks:
|
||||||
|
return failure("Received block is not currently a pending block")
|
||||||
|
|
||||||
|
if bd.address.leaf:
|
||||||
|
without proof =? bd.proof:
|
||||||
|
return failure("Missing proof")
|
||||||
|
|
||||||
|
if proof.index != bd.address.index:
|
||||||
|
return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index)
|
||||||
|
|
||||||
|
without leaf =? bd.blk.cid.mhash.mapFailure, err:
|
||||||
|
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
|
||||||
|
|
||||||
|
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
|
||||||
|
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
|
||||||
|
|
||||||
|
if err =? proof.verify(leaf, treeRoot).errorOption:
|
||||||
|
return failure("Unable to verify proof for block, nested err: " & err.msg)
|
||||||
|
|
||||||
|
else: # not leaf
|
||||||
|
if bd.address.cid != bd.blk.cid:
|
||||||
|
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
|
||||||
|
|
||||||
|
return success()
|
||||||
|
|
||||||
|
proc blocksDeliveryHandler*(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
blocks: seq[bt.Block]) {.async.} =
|
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||||
## handle incoming blocks
|
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt($it.address)).join(",")
|
||||||
##
|
|
||||||
|
|
||||||
trace "Got blocks from peer", peer, len = blocks.len
|
var validatedBlocksDelivery: seq[BlockDelivery]
|
||||||
for blk in blocks:
|
for bd in blocksDelivery:
|
||||||
if isErr (await b.localStore.putBlock(blk)):
|
logScope:
|
||||||
trace "Unable to store block", cid = blk.cid
|
peer = peer
|
||||||
|
address = bd.address
|
||||||
|
|
||||||
|
if err =? b.validateBlockDelivery(bd).errorOption:
|
||||||
|
warn "Block validation failed", msg = err.msg
|
||||||
|
continue
|
||||||
|
|
||||||
|
if err =? (await b.localStore.putBlock(bd.blk)).errorOption:
|
||||||
|
error "Unable to store block", err = err.msg
|
||||||
|
continue
|
||||||
|
|
||||||
|
if bd.address.leaf:
|
||||||
|
without proof =? bd.proof:
|
||||||
|
error "Proof expected for a leaf block delivery"
|
||||||
|
continue
|
||||||
|
if err =? (await b.localStore.putCidAndProof(
|
||||||
|
bd.address.treeCid,
|
||||||
|
bd.address.index,
|
||||||
|
bd.blk.cid,
|
||||||
|
proof)).errorOption:
|
||||||
|
|
||||||
|
error "Unable to store proof and cid for a block"
|
||||||
|
continue
|
||||||
|
|
||||||
|
validatedBlocksDelivery.add(bd)
|
||||||
|
|
||||||
|
await b.resolveBlocks(validatedBlocksDelivery)
|
||||||
|
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||||
|
|
||||||
await b.resolveBlocks(blocks)
|
|
||||||
let
|
let
|
||||||
peerCtx = b.peers.get(peer)
|
peerCtx = b.peers.get(peer)
|
||||||
|
|
||||||
if peerCtx != nil:
|
if peerCtx != nil:
|
||||||
# we don't care about this blocks anymore, lets cleanup the list
|
await b.payForBlocks(peerCtx, blocksDelivery)
|
||||||
await b.payForBlocks(peerCtx, blocks)
|
## shouldn't we remove them from the want-list instead of this:
|
||||||
peerCtx.cleanPresence(blocks.mapIt( it.cid ))
|
peerCtx.cleanPresence(blocksDelivery.mapIt( it.address ))
|
||||||
|
|
||||||
proc wantListHandler*(
|
proc wantListHandler*(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
wantList: WantList) {.async.} =
|
wantList: WantList) {.async.} =
|
||||||
## Handle incoming want lists
|
let
|
||||||
##
|
peerCtx = b.peers.get(peer)
|
||||||
|
|
||||||
trace "Got want list for peer", peer, items = wantList.entries.len
|
|
||||||
let peerCtx = b.peers.get(peer)
|
|
||||||
if isNil(peerCtx):
|
if isNil(peerCtx):
|
||||||
return
|
return
|
||||||
|
|
||||||
var
|
var
|
||||||
precense: seq[BlockPresence]
|
presence: seq[BlockPresence]
|
||||||
|
|
||||||
for e in wantList.entries:
|
for e in wantList.entries:
|
||||||
let
|
let
|
||||||
idx = peerCtx.peerWants.find(e)
|
idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
peer = peerCtx.id
|
peer = peerCtx.id
|
||||||
cid = e.cid
|
address = e.address
|
||||||
wantType = $e.wantType
|
wantType = $e.wantType
|
||||||
|
|
||||||
if idx < 0: # updating entry
|
if idx < 0: # updating entry
|
||||||
trace "Processing new want list entry", cid = e.cid
|
|
||||||
|
|
||||||
let
|
let
|
||||||
have = await e.cid in b.localStore
|
have = await e.address in b.localStore
|
||||||
price = @(
|
price = @(
|
||||||
b.pricing.get(Pricing(price: 0.u256))
|
b.pricing.get(Pricing(price: 0.u256))
|
||||||
.price.toBytesBE)
|
.price.toBytesBE)
|
||||||
|
|
||||||
|
if e.wantType == WantType.WantHave:
|
||||||
|
codex_block_exchange_want_have_lists_received.inc()
|
||||||
|
|
||||||
if not have and e.sendDontHave:
|
if not have and e.sendDontHave:
|
||||||
trace "Adding dont have entry to precense response", cid = e.cid
|
presence.add(
|
||||||
precense.add(
|
|
||||||
BlockPresence(
|
BlockPresence(
|
||||||
cid: e.cid.data.buffer,
|
address: e.address,
|
||||||
`type`: BlockPresenceType.DontHave,
|
`type`: BlockPresenceType.DontHave,
|
||||||
price: price))
|
price: price))
|
||||||
elif have and e.wantType == WantType.WantHave:
|
elif have and e.wantType == WantType.WantHave:
|
||||||
trace "Adding have entry to precense response", cid = e.cid
|
presence.add(
|
||||||
precense.add(
|
|
||||||
BlockPresence(
|
BlockPresence(
|
||||||
cid: e.cid.data.buffer,
|
address: e.address,
|
||||||
`type`: BlockPresenceType.Have,
|
`type`: BlockPresenceType.Have,
|
||||||
price: price))
|
price: price))
|
||||||
elif e.wantType == WantType.WantBlock:
|
elif e.wantType == WantType.WantBlock:
|
||||||
trace "Added entry to peer's want blocks list", cid = e.cid
|
|
||||||
peerCtx.peerWants.add(e)
|
peerCtx.peerWants.add(e)
|
||||||
|
codex_block_exchange_want_block_lists_received.inc()
|
||||||
else:
|
else:
|
||||||
# peer doesn't want this block anymore
|
# peer doesn't want this block anymore
|
||||||
if e.cancel:
|
if e.cancel:
|
||||||
trace "Removing entry from peer want list"
|
|
||||||
peerCtx.peerWants.del(idx)
|
peerCtx.peerWants.del(idx)
|
||||||
else:
|
else:
|
||||||
trace "Updating entry in peer want list"
|
|
||||||
# peer might want to ask for the same cid with
|
# peer might want to ask for the same cid with
|
||||||
# different want params
|
# different want params
|
||||||
peerCtx.peerWants[idx] = e # update entry
|
peerCtx.peerWants[idx] = e # update entry
|
||||||
|
|
||||||
if precense.len > 0:
|
if presence.len > 0:
|
||||||
trace "Sending precense to remote", items = precense.len
|
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||||
await b.network.request.sendPresence(peer, precense)
|
await b.network.request.sendPresence(peer, presence)
|
||||||
|
|
||||||
if not b.scheduleTask(peerCtx):
|
if not b.scheduleTask(peerCtx):
|
||||||
trace "Unable to schedule task for peer", peer
|
warn "Unable to schedule task for peer", peer
|
||||||
|
|
||||||
proc accountHandler*(
|
proc accountHandler*(
|
||||||
engine: BlockExcEngine,
|
engine: BlockExcEngine,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
account: Account) {.async.} =
|
account: Account) {.async.} =
|
||||||
let context = engine.peers.get(peer)
|
let
|
||||||
|
context = engine.peers.get(peer)
|
||||||
if context.isNil:
|
if context.isNil:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -403,7 +481,8 @@ proc paymentHandler*(
|
||||||
return
|
return
|
||||||
|
|
||||||
if channel =? context.paymentChannel:
|
if channel =? context.paymentChannel:
|
||||||
let sender = account.address
|
let
|
||||||
|
sender = account.address
|
||||||
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
||||||
else:
|
else:
|
||||||
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
||||||
|
@ -413,6 +492,8 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
||||||
## list exchange
|
## list exchange
|
||||||
##
|
##
|
||||||
|
|
||||||
|
trace "Setting up peer", peer
|
||||||
|
|
||||||
if peer notin b.peers:
|
if peer notin b.peers:
|
||||||
trace "Setting up new peer", peer
|
trace "Setting up new peer", peer
|
||||||
b.peers.add(BlockExcPeerCtx(
|
b.peers.add(BlockExcPeerCtx(
|
||||||
|
@ -421,9 +502,11 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
||||||
trace "Added peer", peers = b.peers.len
|
trace "Added peer", peers = b.peers.len
|
||||||
|
|
||||||
# broadcast our want list, the other peer will do the same
|
# broadcast our want list, the other peer will do the same
|
||||||
if b.pendingBlocks.len > 0:
|
if b.pendingBlocks.wantListLen > 0:
|
||||||
|
trace "Sending our want list to a peer", peer
|
||||||
|
let cids = toSeq(b.pendingBlocks.wantList)
|
||||||
await b.network.request.sendWantList(
|
await b.network.request.sendWantList(
|
||||||
peer, toSeq(b.pendingBlocks.wantList), full = true)
|
peer, cids, full = true)
|
||||||
|
|
||||||
if address =? b.pricing.?address:
|
if address =? b.pricing.?address:
|
||||||
await b.network.request.sendAccount(peer, Account(address: address))
|
await b.network.request.sendAccount(peer, Account(address: address))
|
||||||
|
@ -438,8 +521,6 @@ proc dropPeer*(b: BlockExcEngine, peer: PeerId) =
|
||||||
b.peers.remove(peer)
|
b.peers.remove(peer)
|
||||||
|
|
||||||
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||||
trace "Handling task for peer", peer = task.id
|
|
||||||
|
|
||||||
# Send to the peer blocks he wants to get,
|
# Send to the peer blocks he wants to get,
|
||||||
# if they present in our local store
|
# if they present in our local store
|
||||||
|
|
||||||
|
@ -448,38 +529,53 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||||
|
|
||||||
var
|
var
|
||||||
wantsBlocks = task.peerWants.filterIt(
|
wantsBlocks = task.peerWants.filterIt(
|
||||||
it.wantType == WantType.WantBlock
|
it.wantType == WantType.WantBlock and not it.inFlight
|
||||||
)
|
)
|
||||||
|
|
||||||
if wantsBlocks.len > 0:
|
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
|
||||||
trace "Got peer want blocks list", items = wantsBlocks.len
|
for peerWant in task.peerWants.mitems:
|
||||||
|
if peerWant.address in addresses:
|
||||||
|
peerWant.inFlight = inFlight
|
||||||
|
|
||||||
|
if wantsBlocks.len > 0:
|
||||||
|
# Mark wants as in-flight.
|
||||||
|
let wantAddresses = wantsBlocks.mapIt(it.address)
|
||||||
|
updateInFlight(wantAddresses, true)
|
||||||
wantsBlocks.sort(SortOrder.Descending)
|
wantsBlocks.sort(SortOrder.Descending)
|
||||||
|
|
||||||
let
|
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
|
||||||
blockFuts = await allFinished(wantsBlocks.mapIt(
|
if e.address.leaf:
|
||||||
b.localStore.getBlock(it.cid)
|
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
||||||
))
|
(blkAndProof: (Block, CodexProof)) =>
|
||||||
|
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
(await b.localStore.getBlock(e.address)).map(
|
||||||
|
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
||||||
|
)
|
||||||
|
|
||||||
# Extract successfully received blocks
|
|
||||||
let
|
let
|
||||||
blocks = blockFuts
|
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
||||||
|
blocksDelivery = blocksDeliveryFut
|
||||||
.filterIt(it.completed and it.read.isOk)
|
.filterIt(it.completed and it.read.isOk)
|
||||||
.mapIt(it.read.get)
|
.mapIt(it.read.get)
|
||||||
|
|
||||||
if blocks.len > 0:
|
# All the wants that failed local lookup must be set to not-in-flight again.
|
||||||
trace "Sending blocks to peer", peer = task.id, blocks = blocks.len
|
let
|
||||||
await b.network.request.sendBlocks(
|
successAddresses = blocksDelivery.mapIt(it.address)
|
||||||
task.id,
|
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
|
||||||
blocks)
|
updateInFlight(failedAddresses, false)
|
||||||
|
|
||||||
trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len
|
if blocksDelivery.len > 0:
|
||||||
# Remove successfully sent blocks
|
trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt($it.address)).join(",")
|
||||||
task.peerWants.keepIf(
|
await b.network.request.sendBlocksDelivery(
|
||||||
proc(e: Entry): bool =
|
task.id,
|
||||||
not blocks.anyIt( it.cid == e.cid )
|
blocksDelivery
|
||||||
)
|
)
|
||||||
trace "Removed entries from peerWants", items = task.peerWants.len
|
|
||||||
|
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
||||||
|
|
||||||
|
task.peerWants.keepItIf(it.address notin successAddresses)
|
||||||
|
|
||||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
|
proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
|
||||||
## process tasks
|
## process tasks
|
||||||
|
@ -490,21 +586,24 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
|
||||||
let
|
let
|
||||||
peerCtx = await b.taskQueue.pop()
|
peerCtx = await b.taskQueue.pop()
|
||||||
|
|
||||||
trace "Got new task from queue", peerId = peerCtx.id
|
|
||||||
await b.taskHandler(peerCtx)
|
await b.taskHandler(peerCtx)
|
||||||
|
|
||||||
trace "Exiting blockexc task runner"
|
info "Exiting blockexc task runner"
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type BlockExcEngine,
|
T: type BlockExcEngine,
|
||||||
localStore: BlockStore,
|
localStore: BlockStore,
|
||||||
wallet: WalletRef,
|
wallet: WalletRef,
|
||||||
network: BlockExcNetwork,
|
network: BlockExcNetwork,
|
||||||
discovery: DiscoveryEngine,
|
discovery: DiscoveryEngine,
|
||||||
peerStore: PeerCtxStore,
|
peerStore: PeerCtxStore,
|
||||||
pendingBlocks: PendingBlocksManager,
|
pendingBlocks: PendingBlocksManager,
|
||||||
concurrentTasks = DefaultConcurrentTasks,
|
concurrentTasks = DefaultConcurrentTasks,
|
||||||
peersPerRequest = DefaultMaxPeersPerRequest): T =
|
peersPerRequest = DefaultMaxPeersPerRequest,
|
||||||
|
blockFetchTimeout = DefaultBlockTimeout,
|
||||||
|
): BlockExcEngine =
|
||||||
|
## Create new block exchange engine instance
|
||||||
|
##
|
||||||
|
|
||||||
let
|
let
|
||||||
engine = BlockExcEngine(
|
engine = BlockExcEngine(
|
||||||
|
@ -516,7 +615,8 @@ proc new*(
|
||||||
wallet: wallet,
|
wallet: wallet,
|
||||||
concurrentTasks: concurrentTasks,
|
concurrentTasks: concurrentTasks,
|
||||||
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
||||||
discovery: discovery)
|
discovery: discovery,
|
||||||
|
blockFetchTimeout: blockFetchTimeout)
|
||||||
|
|
||||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
if event.kind == PeerEventKind.Joined:
|
if event.kind == PeerEventKind.Joined:
|
||||||
|
@ -538,10 +638,10 @@ proc new*(
|
||||||
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||||
engine.blockPresenceHandler(peer, presence)
|
engine.blockPresenceHandler(peer, presence)
|
||||||
|
|
||||||
proc blocksHandler(
|
proc blocksDeliveryHandler(
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
|
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
||||||
engine.blocksHandler(peer, blocks)
|
engine.blocksDeliveryHandler(peer, blocksDelivery)
|
||||||
|
|
||||||
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||||
engine.accountHandler(peer, account)
|
engine.accountHandler(peer, account)
|
||||||
|
@ -551,7 +651,7 @@ proc new*(
|
||||||
|
|
||||||
network.handlers = BlockExcHandlers(
|
network.handlers = BlockExcHandlers(
|
||||||
onWantList: blockWantListHandler,
|
onWantList: blockWantListHandler,
|
||||||
onBlocks: blocksHandler,
|
onBlocksDelivery: blocksDeliveryHandler,
|
||||||
onPresence: blockPresenceHandler,
|
onPresence: blockPresenceHandler,
|
||||||
onAccount: accountHandler,
|
onAccount: accountHandler,
|
||||||
onPayment: paymentHandler)
|
onPayment: paymentHandler)
|
||||||
|
|
|
@ -8,21 +8,26 @@
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/tables
|
import std/tables
|
||||||
|
import std/monotimes
|
||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/questionable
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
import pkg/metrics
|
||||||
|
|
||||||
|
import ../protobuf/blockexc
|
||||||
import ../../blocktype
|
import ../../blocktype
|
||||||
|
import ../../logutils
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex pendingblocks"
|
topics = "codex pendingblocks"
|
||||||
|
|
||||||
|
declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests")
|
||||||
|
declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us")
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultBlockTimeout* = 10.minutes
|
DefaultBlockTimeout* = 10.minutes
|
||||||
|
|
||||||
|
@ -30,83 +35,123 @@ type
|
||||||
BlockReq* = object
|
BlockReq* = object
|
||||||
handle*: Future[Block]
|
handle*: Future[Block]
|
||||||
inFlight*: bool
|
inFlight*: bool
|
||||||
|
startTime*: int64
|
||||||
|
|
||||||
PendingBlocksManager* = ref object of RootObj
|
PendingBlocksManager* = ref object of RootObj
|
||||||
blocks*: Table[Cid, BlockReq] # pending Block requests
|
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
||||||
|
|
||||||
|
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||||
|
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
||||||
|
|
||||||
proc getWantHandle*(
|
proc getWantHandle*(
|
||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager,
|
||||||
cid: Cid,
|
address: BlockAddress,
|
||||||
timeout = DefaultBlockTimeout,
|
timeout = DefaultBlockTimeout,
|
||||||
inFlight = false): Future[Block] {.async.} =
|
inFlight = false): Future[Block] {.async.} =
|
||||||
## Add an event for a block
|
## Add an event for a block
|
||||||
##
|
##
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if cid notin p.blocks:
|
if address notin p.blocks:
|
||||||
p.blocks[cid] = BlockReq(
|
p.blocks[address] = BlockReq(
|
||||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||||
inFlight: inFlight)
|
inFlight: inFlight,
|
||||||
|
startTime: getMonoTime().ticks)
|
||||||
|
|
||||||
trace "Adding pending future for block", cid, inFlight = p.blocks[cid].inFlight
|
p.updatePendingBlockGauge()
|
||||||
|
return await p.blocks[address].handle.wait(timeout)
|
||||||
return await p.blocks[cid].handle.wait(timeout)
|
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
trace "Blocks cancelled", exc = exc.msg, cid
|
trace "Blocks cancelled", exc = exc.msg, address
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Pending WANT failed or expired", exc = exc.msg
|
error "Pending WANT failed or expired", exc = exc.msg
|
||||||
# no need to cancel, it is already cancelled by wait()
|
# no need to cancel, it is already cancelled by wait()
|
||||||
raise exc
|
raise exc
|
||||||
finally:
|
finally:
|
||||||
p.blocks.del(cid)
|
p.blocks.del(address)
|
||||||
|
p.updatePendingBlockGauge()
|
||||||
|
|
||||||
|
proc getWantHandle*(
|
||||||
|
p: PendingBlocksManager,
|
||||||
|
cid: Cid,
|
||||||
|
timeout = DefaultBlockTimeout,
|
||||||
|
inFlight = false): Future[Block] =
|
||||||
|
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
|
||||||
|
|
||||||
proc resolve*(
|
proc resolve*(
|
||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager,
|
||||||
blocks: seq[Block]) =
|
blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} =
|
||||||
## Resolve pending blocks
|
## Resolve pending blocks
|
||||||
##
|
##
|
||||||
|
|
||||||
for blk in blocks:
|
for bd in blocksDelivery:
|
||||||
# resolve any pending blocks
|
p.blocks.withValue(bd.address, blockReq):
|
||||||
p.blocks.withValue(blk.cid, pending):
|
if not blockReq.handle.finished:
|
||||||
if not pending[].handle.completed:
|
let
|
||||||
trace "Resolving block", cid = blk.cid
|
startTime = blockReq.startTime
|
||||||
pending[].handle.complete(blk)
|
stopTime = getMonoTime().ticks
|
||||||
|
retrievalDurationUs = (stopTime - startTime) div 1000
|
||||||
|
|
||||||
|
blockReq.handle.complete(bd.blk)
|
||||||
|
|
||||||
|
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
|
||||||
|
|
||||||
|
if retrievalDurationUs > 500000:
|
||||||
|
warn "High block retrieval time", retrievalDurationUs, address = bd.address
|
||||||
|
else:
|
||||||
|
trace "Block handle already finished", address = bd.address
|
||||||
|
|
||||||
proc setInFlight*(
|
proc setInFlight*(
|
||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager,
|
||||||
cid: Cid,
|
address: BlockAddress,
|
||||||
inFlight = true) =
|
inFlight = true) =
|
||||||
p.blocks.withValue(cid, pending):
|
## Set inflight status for a block
|
||||||
|
##
|
||||||
|
|
||||||
|
p.blocks.withValue(address, pending):
|
||||||
pending[].inFlight = inFlight
|
pending[].inFlight = inFlight
|
||||||
trace "Setting inflight", cid, inFlight = pending[].inFlight
|
|
||||||
|
|
||||||
proc isInFlight*(
|
proc isInFlight*(
|
||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager,
|
||||||
cid: Cid): bool =
|
address: BlockAddress): bool =
|
||||||
p.blocks.withValue(cid, pending):
|
## Check if a block is in flight
|
||||||
|
##
|
||||||
|
|
||||||
|
p.blocks.withValue(address, pending):
|
||||||
result = pending[].inFlight
|
result = pending[].inFlight
|
||||||
trace "Getting inflight", cid, inFlight = result
|
|
||||||
|
|
||||||
proc pending*(
|
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
|
||||||
p: PendingBlocksManager,
|
BlockAddress.init(cid) in p.blocks
|
||||||
cid: Cid): bool = cid in p.blocks
|
|
||||||
|
|
||||||
proc contains*(
|
proc contains*(p: PendingBlocksManager, address: BlockAddress): bool =
|
||||||
p: PendingBlocksManager,
|
address in p.blocks
|
||||||
cid: Cid): bool = p.pending(cid)
|
|
||||||
|
|
||||||
iterator wantList*(p: PendingBlocksManager): Cid =
|
iterator wantList*(p: PendingBlocksManager): BlockAddress =
|
||||||
for k in p.blocks.keys:
|
for a in p.blocks.keys:
|
||||||
yield k
|
yield a
|
||||||
|
|
||||||
|
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
|
||||||
|
for a in p.blocks.keys:
|
||||||
|
if not a.leaf:
|
||||||
|
yield a.cid
|
||||||
|
|
||||||
|
iterator wantListCids*(p: PendingBlocksManager): Cid =
|
||||||
|
var yieldedCids = initHashSet[Cid]()
|
||||||
|
for a in p.blocks.keys:
|
||||||
|
let cid = a.cidOrTreeCid
|
||||||
|
if cid notin yieldedCids:
|
||||||
|
yieldedCids.incl(cid)
|
||||||
|
yield cid
|
||||||
|
|
||||||
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
|
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
|
||||||
for v in p.blocks.values:
|
for v in p.blocks.values:
|
||||||
yield v.handle
|
yield v.handle
|
||||||
|
|
||||||
|
proc wantListLen*(p: PendingBlocksManager): int =
|
||||||
|
p.blocks.len
|
||||||
|
|
||||||
func len*(p: PendingBlocksManager): int =
|
func len*(p: PendingBlocksManager): int =
|
||||||
p.blocks.len
|
p.blocks.len
|
||||||
|
|
||||||
func new*(T: type PendingBlocksManager): T =
|
func new*(T: type PendingBlocksManager): PendingBlocksManager =
|
||||||
T()
|
PendingBlocksManager()
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
@ -19,6 +18,7 @@ import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
||||||
import ../../blocktype as bt
|
import ../../blocktype as bt
|
||||||
|
import ../../logutils
|
||||||
import ../protobuf/blockexc as pb
|
import ../protobuf/blockexc as pb
|
||||||
import ../protobuf/payments
|
import ../protobuf/payments
|
||||||
|
|
||||||
|
@ -34,47 +34,61 @@ const
|
||||||
MaxInflight* = 100
|
MaxInflight* = 100
|
||||||
|
|
||||||
type
|
type
|
||||||
WantListHandler* = proc(peer: PeerID, wantList: WantList): Future[void] {.gcsafe.}
|
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
||||||
BlocksHandler* = proc(peer: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
|
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||||
BlockPresenceHandler* = proc(peer: PeerID, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||||
AccountHandler* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.}
|
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||||
PaymentHandler* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.}
|
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
|
BlockExcHandlers* = object
|
||||||
|
onWantList*: WantListHandler
|
||||||
|
onBlocksDelivery*: BlocksDeliveryHandler
|
||||||
|
onPresence*: BlockPresenceHandler
|
||||||
|
onAccount*: AccountHandler
|
||||||
|
onPayment*: PaymentHandler
|
||||||
|
|
||||||
WantListSender* = proc(
|
WantListSender* = proc(
|
||||||
id: PeerID,
|
id: PeerId,
|
||||||
cids: seq[Cid],
|
addresses: seq[BlockAddress],
|
||||||
priority: int32 = 0,
|
priority: int32 = 0,
|
||||||
cancel: bool = false,
|
cancel: bool = false,
|
||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false): Future[void] {.gcsafe.}
|
sendDontHave: bool = false): Future[void] {.gcsafe.}
|
||||||
|
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
||||||
BlockExcHandlers* = object
|
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||||
onWantList*: WantListHandler
|
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||||
onBlocks*: BlocksHandler
|
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||||
onPresence*: BlockPresenceHandler
|
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||||
onAccount*: AccountHandler
|
|
||||||
onPayment*: PaymentHandler
|
|
||||||
|
|
||||||
BlocksSender* = proc(peer: PeerID, presence: seq[bt.Block]): Future[void] {.gcsafe.}
|
|
||||||
PresenceSender* = proc(peer: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
|
||||||
AccountSender* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.}
|
|
||||||
PaymentSender* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.}
|
|
||||||
|
|
||||||
BlockExcRequest* = object
|
BlockExcRequest* = object
|
||||||
sendWantList*: WantListSender
|
sendWantList*: WantListSender
|
||||||
sendBlocks*: BlocksSender
|
sendWantCancellations*: WantCancellationSender
|
||||||
|
sendBlocksDelivery*: BlocksDeliverySender
|
||||||
sendPresence*: PresenceSender
|
sendPresence*: PresenceSender
|
||||||
sendAccount*: AccountSender
|
sendAccount*: AccountSender
|
||||||
sendPayment*: PaymentSender
|
sendPayment*: PaymentSender
|
||||||
|
|
||||||
BlockExcNetwork* = ref object of LPProtocol
|
BlockExcNetwork* = ref object of LPProtocol
|
||||||
peers*: Table[PeerID, NetworkPeer]
|
peers*: Table[PeerId, NetworkPeer]
|
||||||
switch*: Switch
|
switch*: Switch
|
||||||
handlers*: BlockExcHandlers
|
handlers*: BlockExcHandlers
|
||||||
request*: BlockExcRequest
|
request*: BlockExcRequest
|
||||||
getConn: ConnProvider
|
getConn: ConnProvider
|
||||||
inflightSema: AsyncSemaphore
|
inflightSema: AsyncSemaphore
|
||||||
|
|
||||||
|
proc peerId*(b: BlockExcNetwork): PeerId =
|
||||||
|
## Return peer id
|
||||||
|
##
|
||||||
|
|
||||||
|
return b.switch.peerInfo.peerId
|
||||||
|
|
||||||
|
proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
|
||||||
|
## Check if peer is self
|
||||||
|
##
|
||||||
|
|
||||||
|
return b.peerId == peer
|
||||||
|
|
||||||
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||||
## Send message to peer
|
## Send message to peer
|
||||||
##
|
##
|
||||||
|
@ -82,8 +96,11 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||||
b.peers.withValue(id, peer):
|
b.peers.withValue(id, peer):
|
||||||
try:
|
try:
|
||||||
await b.inflightSema.acquire()
|
await b.inflightSema.acquire()
|
||||||
trace "Sending message to peer", peer = id
|
|
||||||
await peer[].send(msg)
|
await peer[].send(msg)
|
||||||
|
except CancelledError as error:
|
||||||
|
raise error
|
||||||
|
except CatchableError as err:
|
||||||
|
error "Error sending message", peer = id, msg = err.msg
|
||||||
finally:
|
finally:
|
||||||
b.inflightSema.release()
|
b.inflightSema.release()
|
||||||
do:
|
do:
|
||||||
|
@ -97,31 +114,12 @@ proc handleWantList(
|
||||||
##
|
##
|
||||||
|
|
||||||
if not b.handlers.onWantList.isNil:
|
if not b.handlers.onWantList.isNil:
|
||||||
trace "Handling want list for peer", peer = peer.id, items = list.entries.len
|
|
||||||
await b.handlers.onWantList(peer.id, list)
|
await b.handlers.onWantList(peer.id, list)
|
||||||
|
|
||||||
# TODO: make into a template
|
|
||||||
proc makeWantList*(
|
|
||||||
cids: seq[Cid],
|
|
||||||
priority: int = 0,
|
|
||||||
cancel: bool = false,
|
|
||||||
wantType: WantType = WantType.WantHave,
|
|
||||||
full: bool = false,
|
|
||||||
sendDontHave: bool = false): WantList =
|
|
||||||
WantList(
|
|
||||||
entries: cids.mapIt(
|
|
||||||
Entry(
|
|
||||||
`block`: it.data.buffer,
|
|
||||||
priority: priority.int32,
|
|
||||||
cancel: cancel,
|
|
||||||
wantType: wantType,
|
|
||||||
sendDontHave: sendDontHave) ),
|
|
||||||
full: full)
|
|
||||||
|
|
||||||
proc sendWantList*(
|
proc sendWantList*(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork,
|
||||||
id: PeerID,
|
id: PeerId,
|
||||||
cids: seq[Cid],
|
addresses: seq[BlockAddress],
|
||||||
priority: int32 = 0,
|
priority: int32 = 0,
|
||||||
cancel: bool = false,
|
cancel: bool = false,
|
||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
|
@ -130,57 +128,45 @@ proc sendWantList*(
|
||||||
## Send a want message to peer
|
## Send a want message to peer
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Sending want list to peer", peer = id, `type` = $wantType, items = cids.len
|
let msg = WantList(
|
||||||
let msg = makeWantList(
|
entries: addresses.mapIt(
|
||||||
cids,
|
WantListEntry(
|
||||||
priority,
|
address: it,
|
||||||
cancel,
|
priority: priority,
|
||||||
wantType,
|
cancel: cancel,
|
||||||
full,
|
wantType: wantType,
|
||||||
sendDontHave)
|
sendDontHave: sendDontHave) ),
|
||||||
|
full: full)
|
||||||
|
|
||||||
b.send(id, Message(wantlist: msg))
|
b.send(id, Message(wantlist: msg))
|
||||||
|
|
||||||
proc handleBlocks(
|
proc sendWantCancellations*(
|
||||||
|
b: BlockExcNetwork,
|
||||||
|
id: PeerId,
|
||||||
|
addresses: seq[BlockAddress]): Future[void] {.async.} =
|
||||||
|
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||||
|
##
|
||||||
|
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||||
|
|
||||||
|
proc handleBlocksDelivery(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork,
|
||||||
peer: NetworkPeer,
|
peer: NetworkPeer,
|
||||||
blocks: seq[pb.Block]) {.async.} =
|
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||||
## Handle incoming blocks
|
## Handle incoming blocks
|
||||||
##
|
##
|
||||||
|
|
||||||
if not b.handlers.onBlocks.isNil:
|
if not b.handlers.onBlocksDelivery.isNil:
|
||||||
trace "Handling blocks for peer", peer = peer.id, items = blocks.len
|
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
|
||||||
|
|
||||||
var blks: seq[bt.Block]
|
|
||||||
for blob in blocks:
|
|
||||||
without cid =? Cid.init(blob.prefix):
|
|
||||||
trace "Unable to initialize Cid from protobuf message"
|
|
||||||
|
|
||||||
without blk =? bt.Block.new(cid, blob.data, verify = true):
|
proc sendBlocksDelivery*(
|
||||||
trace "Unable to initialize Block from data"
|
|
||||||
|
|
||||||
blks.add(blk)
|
|
||||||
|
|
||||||
await b.handlers.onBlocks(peer.id, blks)
|
|
||||||
|
|
||||||
template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] =
|
|
||||||
var blks: seq[pb.Block]
|
|
||||||
for blk in blocks:
|
|
||||||
blks.add(pb.Block(
|
|
||||||
prefix: blk.cid.data.buffer,
|
|
||||||
data: blk.data
|
|
||||||
))
|
|
||||||
|
|
||||||
blks
|
|
||||||
|
|
||||||
proc sendBlocks*(
|
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork,
|
||||||
id: PeerID,
|
id: PeerId,
|
||||||
blocks: seq[bt.Block]): Future[void] =
|
blocksDelivery: seq[BlockDelivery]): Future[void] =
|
||||||
## Send blocks to remote
|
## Send blocks to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
b.send(id, pb.Message(payload: makeBlocks(blocks)))
|
b.send(id, pb.Message(payload: blocksDelivery))
|
||||||
|
|
||||||
proc handleBlockPresence(
|
proc handleBlockPresence(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork,
|
||||||
|
@ -190,12 +176,11 @@ proc handleBlockPresence(
|
||||||
##
|
##
|
||||||
|
|
||||||
if not b.handlers.onPresence.isNil:
|
if not b.handlers.onPresence.isNil:
|
||||||
trace "Handling block presence for peer", peer = peer.id, items = presence.len
|
|
||||||
await b.handlers.onPresence(peer.id, presence)
|
await b.handlers.onPresence(peer.id, presence)
|
||||||
|
|
||||||
proc sendBlockPresence*(
|
proc sendBlockPresence*(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork,
|
||||||
id: PeerID,
|
id: PeerId,
|
||||||
presence: seq[BlockPresence]): Future[void] =
|
presence: seq[BlockPresence]): Future[void] =
|
||||||
## Send presence to remote
|
## Send presence to remote
|
||||||
##
|
##
|
||||||
|
@ -240,43 +225,46 @@ proc handlePayment(
|
||||||
if not network.handlers.onPayment.isNil:
|
if not network.handlers.onPayment.isNil:
|
||||||
await network.handlers.onPayment(peer.id, payment)
|
await network.handlers.onPayment(peer.id, payment)
|
||||||
|
|
||||||
proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.async.} =
|
proc rpcHandler(
|
||||||
try:
|
b: BlockExcNetwork,
|
||||||
if msg.wantlist.entries.len > 0:
|
peer: NetworkPeer,
|
||||||
asyncSpawn b.handleWantList(peer, msg.wantlist)
|
msg: Message) {.raises: [].} =
|
||||||
|
## handle rpc messages
|
||||||
|
##
|
||||||
|
if msg.wantList.entries.len > 0:
|
||||||
|
asyncSpawn b.handleWantList(peer, msg.wantList)
|
||||||
|
|
||||||
if msg.payload.len > 0:
|
if msg.payload.len > 0:
|
||||||
asyncSpawn b.handleBlocks(peer, msg.payload)
|
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
|
||||||
|
|
||||||
if msg.blockPresences.len > 0:
|
if msg.blockPresences.len > 0:
|
||||||
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
|
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
|
||||||
|
|
||||||
if account =? Account.init(msg.account):
|
if account =? Account.init(msg.account):
|
||||||
asyncSpawn b.handleAccount(peer, account)
|
asyncSpawn b.handleAccount(peer, account)
|
||||||
|
|
||||||
if payment =? SignedState.init(msg.payment):
|
if payment =? SignedState.init(msg.payment):
|
||||||
asyncSpawn b.handlePayment(peer, payment)
|
asyncSpawn b.handlePayment(peer, payment)
|
||||||
|
|
||||||
except CatchableError as exc:
|
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||||
trace "Exception in blockexc rpc handler", exc = exc.msg
|
|
||||||
|
|
||||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
|
|
||||||
## Creates or retrieves a BlockExcNetwork Peer
|
## Creates or retrieves a BlockExcNetwork Peer
|
||||||
##
|
##
|
||||||
|
|
||||||
if peer in b.peers:
|
if peer in b.peers:
|
||||||
return b.peers.getOrDefault(peer, nil)
|
return b.peers.getOrDefault(peer, nil)
|
||||||
|
|
||||||
var getConn = proc(): Future[Connection] {.async.} =
|
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
||||||
try:
|
try:
|
||||||
return await b.switch.dial(peer, Codec)
|
return await b.switch.dial(peer, Codec)
|
||||||
|
except CancelledError as error:
|
||||||
|
raise error
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Unable to connect to blockexc peer", exc = exc.msg
|
trace "Unable to connect to blockexc peer", exc = exc.msg
|
||||||
|
|
||||||
if not isNil(b.getConn):
|
if not isNil(b.getConn):
|
||||||
getConn = b.getConn
|
getConn = b.getConn
|
||||||
|
|
||||||
let rpcHandler = proc (p: NetworkPeer, msg: Message): Future[void] =
|
let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} =
|
||||||
b.rpcHandler(p, msg)
|
b.rpcHandler(p, msg)
|
||||||
|
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
|
@ -287,7 +275,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
|
||||||
|
|
||||||
return blockExcPeer
|
return blockExcPeer
|
||||||
|
|
||||||
proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
|
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||||
## Perform initial setup, such as want
|
## Perform initial setup, such as want
|
||||||
## list exchange
|
## list exchange
|
||||||
##
|
##
|
||||||
|
@ -295,9 +283,16 @@ proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
|
||||||
discard b.getOrCreatePeer(peer)
|
discard b.getOrCreatePeer(peer)
|
||||||
|
|
||||||
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||||
|
## Dial a peer
|
||||||
|
##
|
||||||
|
|
||||||
|
if b.isSelf(peer.peerId):
|
||||||
|
trace "Skipping dialing self", peer = peer.peerId
|
||||||
|
return
|
||||||
|
|
||||||
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||||
|
|
||||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerID) =
|
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||||
## Cleanup disconnected peer
|
## Cleanup disconnected peer
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -307,7 +302,7 @@ method init*(b: BlockExcNetwork) =
|
||||||
## Perform protocol initialization
|
## Perform protocol initialization
|
||||||
##
|
##
|
||||||
|
|
||||||
proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} =
|
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
if event.kind == PeerEventKind.Joined:
|
if event.kind == PeerEventKind.Joined:
|
||||||
b.setupPeer(peerId)
|
b.setupPeer(peerId)
|
||||||
else:
|
else:
|
||||||
|
@ -328,7 +323,7 @@ proc new*(
|
||||||
T: type BlockExcNetwork,
|
T: type BlockExcNetwork,
|
||||||
switch: Switch,
|
switch: Switch,
|
||||||
connProvider: ConnProvider = nil,
|
connProvider: ConnProvider = nil,
|
||||||
maxInflight = MaxInflight): T =
|
maxInflight = MaxInflight): BlockExcNetwork =
|
||||||
## Create a new BlockExcNetwork instance
|
## Create a new BlockExcNetwork instance
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -339,8 +334,8 @@ proc new*(
|
||||||
inflightSema: newAsyncSemaphore(maxInflight))
|
inflightSema: newAsyncSemaphore(maxInflight))
|
||||||
|
|
||||||
proc sendWantList(
|
proc sendWantList(
|
||||||
id: PeerID,
|
id: PeerId,
|
||||||
cids: seq[Cid],
|
cids: seq[BlockAddress],
|
||||||
priority: int32 = 0,
|
priority: int32 = 0,
|
||||||
cancel: bool = false,
|
cancel: bool = false,
|
||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
|
@ -350,21 +345,25 @@ proc new*(
|
||||||
id, cids, priority, cancel,
|
id, cids, priority, cancel,
|
||||||
wantType, full, sendDontHave)
|
wantType, full, sendDontHave)
|
||||||
|
|
||||||
proc sendBlocks(id: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
|
proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} =
|
||||||
self.sendBlocks(id, blocks)
|
self.sendWantCancellations(id, addresses)
|
||||||
|
|
||||||
proc sendPresence(id: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
||||||
|
self.sendBlocksDelivery(id, blocksDelivery)
|
||||||
|
|
||||||
|
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||||
self.sendBlockPresence(id, presence)
|
self.sendBlockPresence(id, presence)
|
||||||
|
|
||||||
proc sendAccount(id: PeerID, account: Account): Future[void] {.gcsafe.} =
|
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||||
self.sendAccount(id, account)
|
self.sendAccount(id, account)
|
||||||
|
|
||||||
proc sendPayment(id: PeerID, payment: SignedState): Future[void] {.gcsafe.} =
|
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||||
self.sendPayment(id, payment)
|
self.sendPayment(id, payment)
|
||||||
|
|
||||||
self.request = BlockExcRequest(
|
self.request = BlockExcRequest(
|
||||||
sendWantList: sendWantList,
|
sendWantList: sendWantList,
|
||||||
sendBlocks: sendBlocks,
|
sendWantCancellations: sendWantCancellations,
|
||||||
|
sendBlocksDelivery: sendBlocksDelivery,
|
||||||
sendPresence: sendPresence,
|
sendPresence: sendPresence,
|
||||||
sendAccount: sendAccount,
|
sendAccount: sendAccount,
|
||||||
sendPayment: sendPayment)
|
sendPayment: sendPayment)
|
||||||
|
|
|
@ -11,18 +11,16 @@ import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
|
||||||
import ../protobuf/blockexc
|
import ../protobuf/blockexc
|
||||||
|
import ../protobuf/message
|
||||||
import ../../errors
|
import ../../errors
|
||||||
|
import ../../logutils
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex blockexcnetworkpeer"
|
topics = "codex blockexcnetworkpeer"
|
||||||
|
|
||||||
const
|
|
||||||
MaxMessageSize = 100 * 1 shl 20 # manifest files can be big
|
|
||||||
|
|
||||||
type
|
type
|
||||||
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
||||||
|
|
||||||
|
@ -45,12 +43,13 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
||||||
try:
|
try:
|
||||||
while not conn.atEof or not conn.closed:
|
while not conn.atEof or not conn.closed:
|
||||||
let
|
let
|
||||||
data = await conn.readLp(MaxMessageSize)
|
data = await conn.readLp(MaxMessageSize.int)
|
||||||
msg = Message.ProtobufDecode(data).mapFailure().tryGet()
|
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
||||||
trace "Got message for peer", peer = b.id
|
|
||||||
await b.handler(b, msg)
|
await b.handler(b, msg)
|
||||||
except CatchableError as exc:
|
except CancelledError:
|
||||||
trace "Exception in blockexc read loop", exc = exc.msg
|
trace "Read loop cancelled"
|
||||||
|
except CatchableError as err:
|
||||||
|
warn "Exception in blockexc read loop", msg = err.msg
|
||||||
finally:
|
finally:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
|
@ -66,18 +65,17 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} =
|
||||||
let conn = await b.connect()
|
let conn = await b.connect()
|
||||||
|
|
||||||
if isNil(conn):
|
if isNil(conn):
|
||||||
trace "Unable to get send connection for peer message not sent", peer = b.id
|
warn "Unable to get send connection for peer message not sent", peer = b.id
|
||||||
return
|
return
|
||||||
|
|
||||||
trace "Sending message to remote", peer = b.id
|
await conn.writeLp(protobufEncode(msg))
|
||||||
await conn.writeLp(ProtobufEncode(msg))
|
|
||||||
|
|
||||||
proc broadcast*(b: NetworkPeer, msg: Message) =
|
proc broadcast*(b: NetworkPeer, msg: Message) =
|
||||||
proc sendAwaiter() {.async.} =
|
proc sendAwaiter() {.async.} =
|
||||||
try:
|
try:
|
||||||
await b.send(msg)
|
await b.send(msg)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
|
warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
|
||||||
|
|
||||||
asyncSpawn sendAwaiter()
|
asyncSpawn sendAwaiter()
|
||||||
|
|
||||||
|
@ -85,7 +83,7 @@ func new*(
|
||||||
T: type NetworkPeer,
|
T: type NetworkPeer,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
connProvider: ConnProvider,
|
connProvider: ConnProvider,
|
||||||
rpcHandler: RPCHandler): T =
|
rpcHandler: RPCHandler): NetworkPeer =
|
||||||
|
|
||||||
doAssert(not isNil(connProvider),
|
doAssert(not isNil(connProvider),
|
||||||
"should supply connection provider")
|
"should supply connection provider")
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/tables
|
import std/tables
|
||||||
|
import std/sets
|
||||||
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
|
@ -20,42 +20,47 @@ import ../protobuf/blockexc
|
||||||
import ../protobuf/payments
|
import ../protobuf/payments
|
||||||
import ../protobuf/presence
|
import ../protobuf/presence
|
||||||
|
|
||||||
export payments, nitro
|
import ../../blocktype
|
||||||
|
import ../../logutils
|
||||||
|
|
||||||
logScope:
|
export payments, nitro
|
||||||
topics = "codex peercontext"
|
|
||||||
|
|
||||||
type
|
type
|
||||||
BlockExcPeerCtx* = ref object of RootObj
|
BlockExcPeerCtx* = ref object of RootObj
|
||||||
id*: PeerID
|
id*: PeerId
|
||||||
blocks*: Table[Cid, Presence] # remote peer have list including price
|
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||||
peerWants*: seq[Entry] # remote peers want lists
|
peerWants*: seq[WantListEntry] # remote peers want lists
|
||||||
exchanged*: int # times peer has exchanged with us
|
exchanged*: int # times peer has exchanged with us
|
||||||
lastExchange*: Moment # last time peer has exchanged with us
|
lastExchange*: Moment # last time peer has exchanged with us
|
||||||
account*: ?Account # ethereum account of this peer
|
account*: ?Account # ethereum account of this peer
|
||||||
paymentChannel*: ?ChannelId # payment channel id
|
paymentChannel*: ?ChannelId # payment channel id
|
||||||
|
|
||||||
proc peerHave*(self: BlockExcPeerCtx): seq[Cid] =
|
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
|
||||||
toSeq(self.blocks.keys)
|
toSeq(self.blocks.keys)
|
||||||
|
|
||||||
proc contains*(self: BlockExcPeerCtx, cid: Cid): bool =
|
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||||
cid in self.blocks
|
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
|
||||||
|
|
||||||
|
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||||
|
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
|
||||||
|
|
||||||
|
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||||
|
address in self.blocks
|
||||||
|
|
||||||
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
||||||
self.blocks[presence.cid] = presence
|
self.blocks[presence.address] = presence
|
||||||
|
|
||||||
func cleanPresence*(self: BlockExcPeerCtx, cids: seq[Cid]) =
|
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
||||||
for cid in cids:
|
for a in addresses:
|
||||||
self.blocks.del(cid)
|
self.blocks.del(a)
|
||||||
|
|
||||||
func cleanPresence*(self: BlockExcPeerCtx, cid: Cid) =
|
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||||
self.cleanPresence(@[cid])
|
self.cleanPresence(@[address])
|
||||||
|
|
||||||
func price*(self: BlockExcPeerCtx, cids: seq[Cid]): UInt256 =
|
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
|
||||||
var price = 0.u256
|
var price = 0.u256
|
||||||
for cid in cids:
|
for a in addresses:
|
||||||
self.blocks.withValue(cid, precense):
|
self.blocks.withValue(a, precense):
|
||||||
price += precense[].price
|
price += precense[].price
|
||||||
|
|
||||||
trace "Blocks price", price
|
|
||||||
price
|
price
|
||||||
|
|
|
@ -16,10 +16,12 @@ import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
|
||||||
import ../protobuf/blockexc
|
import ../protobuf/blockexc
|
||||||
|
import ../../blocktype
|
||||||
|
import ../../logutils
|
||||||
|
|
||||||
|
|
||||||
import ./peercontext
|
import ./peercontext
|
||||||
export peercontext
|
export peercontext
|
||||||
|
@ -29,56 +31,59 @@ logScope:
|
||||||
|
|
||||||
type
|
type
|
||||||
PeerCtxStore* = ref object of RootObj
|
PeerCtxStore* = ref object of RootObj
|
||||||
peers*: OrderedTable[PeerID, BlockExcPeerCtx]
|
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
|
||||||
|
|
||||||
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
|
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
|
||||||
for p in self.peers.values:
|
for p in self.peers.values:
|
||||||
yield p
|
yield p
|
||||||
|
|
||||||
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerID): bool =
|
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
|
||||||
## Convenience method to check for peer precense
|
## Convenience method to check for peer precense
|
||||||
##
|
##
|
||||||
|
|
||||||
a.anyIt( it.id == b )
|
a.anyIt( it.id == b )
|
||||||
|
|
||||||
func contains*(self: PeerCtxStore, peerId: PeerID): bool =
|
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
|
||||||
peerId in self.peers
|
peerId in self.peers
|
||||||
|
|
||||||
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
|
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
|
||||||
trace "Adding peer to peer context store", peer = peer.id
|
|
||||||
self.peers[peer.id] = peer
|
self.peers[peer.id] = peer
|
||||||
|
|
||||||
func remove*(self: PeerCtxStore, peerId: PeerID) =
|
func remove*(self: PeerCtxStore, peerId: PeerId) =
|
||||||
trace "Removing peer from peer context store", peer = peerId
|
|
||||||
self.peers.del(peerId)
|
self.peers.del(peerId)
|
||||||
|
|
||||||
func get*(self: PeerCtxStore, peerId: PeerID): BlockExcPeerCtx =
|
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
|
||||||
trace "Retrieving peer from peer context store", peer = peerId
|
|
||||||
self.peers.getOrDefault(peerId, nil)
|
self.peers.getOrDefault(peerId, nil)
|
||||||
|
|
||||||
func len*(self: PeerCtxStore): int =
|
func len*(self: PeerCtxStore): int =
|
||||||
self.peers.len
|
self.peers.len
|
||||||
|
|
||||||
|
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||||
|
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
|
||||||
|
|
||||||
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||||
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == cid ) )
|
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) )
|
||||||
|
|
||||||
|
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||||
|
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
|
||||||
|
|
||||||
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||||
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.cid == cid ) )
|
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
|
||||||
|
|
||||||
func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||||
var
|
# assume that the price for all leaves in a tree is the same
|
||||||
peers = self.peersHave(cid)
|
let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid)
|
||||||
|
var peers = self.peersHave(rootAddress)
|
||||||
|
|
||||||
trace "Selecting cheapest peers", peers = peers.len
|
|
||||||
func cmp(a, b: BlockExcPeerCtx): int =
|
func cmp(a, b: BlockExcPeerCtx): int =
|
||||||
var
|
var
|
||||||
priceA = 0.u256
|
priceA = 0.u256
|
||||||
priceB = 0.u256
|
priceB = 0.u256
|
||||||
|
|
||||||
a.blocks.withValue(cid, precense):
|
a.blocks.withValue(rootAddress, precense):
|
||||||
priceA = precense[].price
|
priceA = precense[].price
|
||||||
|
|
||||||
b.blocks.withValue(cid, precense):
|
b.blocks.withValue(rootAddress, precense):
|
||||||
priceB = precense[].price
|
priceB = precense[].price
|
||||||
|
|
||||||
if priceA == priceB:
|
if priceA == priceB:
|
||||||
|
@ -93,5 +98,5 @@ func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||||
return peers
|
return peers
|
||||||
|
|
||||||
proc new*(T: type PeerCtxStore): PeerCtxStore =
|
proc new*(T: type PeerCtxStore): PeerCtxStore =
|
||||||
T(
|
## create new instance of a peer context store
|
||||||
peers: initOrderedTable[PeerID, BlockExcPeerCtx]())
|
PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]())
|
||||||
|
|
|
@ -9,47 +9,45 @@
|
||||||
|
|
||||||
import std/hashes
|
import std/hashes
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import pkg/libp2p
|
import pkg/stew/endians2
|
||||||
|
|
||||||
import message
|
import message
|
||||||
|
|
||||||
export Message, ProtobufEncode, ProtobufDecode
|
import ../../blocktype
|
||||||
export Wantlist, WantType, Entry
|
|
||||||
export Block, BlockPresenceType, BlockPresence
|
export Message, protobufEncode, protobufDecode
|
||||||
|
export Wantlist, WantType, WantListEntry
|
||||||
|
export BlockDelivery, BlockPresenceType, BlockPresence
|
||||||
export AccountMessage, StateChannelUpdate
|
export AccountMessage, StateChannelUpdate
|
||||||
|
|
||||||
proc hash*(e: Entry): Hash =
|
proc hash*(a: BlockAddress): Hash =
|
||||||
hash(e.`block`)
|
if a.leaf:
|
||||||
|
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||||
|
hash(data)
|
||||||
|
else:
|
||||||
|
hash(a.cid.data.buffer)
|
||||||
|
|
||||||
proc cid*(e: Entry): Cid =
|
proc hash*(e: WantListEntry): Hash =
|
||||||
## Helper to convert raw bytes to Cid
|
hash(e.address)
|
||||||
##
|
|
||||||
|
|
||||||
Cid.init(e.`block`).get()
|
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
|
||||||
|
|
||||||
proc contains*(a: openArray[Entry], b: Cid): bool =
|
|
||||||
## Convenience method to check for peer precense
|
## Convenience method to check for peer precense
|
||||||
##
|
##
|
||||||
|
|
||||||
a.filterIt( it.cid == b ).len > 0
|
a.anyIt(it.address == b)
|
||||||
|
|
||||||
proc `==`*(a: Entry, cid: Cid): bool =
|
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
|
||||||
return a.cid == cid
|
return a.address == b
|
||||||
|
|
||||||
proc `<`*(a, b: Entry): bool =
|
proc `<`*(a, b: WantListEntry): bool =
|
||||||
a.priority < b.priority
|
a.priority < b.priority
|
||||||
|
|
||||||
proc cid*(e: BlockPresence): Cid =
|
|
||||||
## Helper to convert raw bytes to Cid
|
|
||||||
##
|
|
||||||
|
|
||||||
Cid.init(e.cid).get()
|
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
|
||||||
|
return a.address == b
|
||||||
|
|
||||||
proc `==`*(a: BlockPresence, cid: Cid): bool =
|
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
|
||||||
return cid(a) == cid
|
|
||||||
|
|
||||||
proc contains*(a: openArray[BlockPresence], b: Cid): bool =
|
|
||||||
## Convenience method to check for peer precense
|
## Convenience method to check for peer precense
|
||||||
##
|
##
|
||||||
|
|
||||||
a.filterIt( cid(it) == b ).len > 0
|
a.anyIt(it.address == b)
|
||||||
|
|
|
@ -2,36 +2,50 @@
|
||||||
# and Protobuf encoder/decoder for these messages.
|
# and Protobuf encoder/decoder for these messages.
|
||||||
#
|
#
|
||||||
# Eventually all this code should be auto-generated from message.proto.
|
# Eventually all this code should be auto-generated from message.proto.
|
||||||
|
import std/sugar
|
||||||
|
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
|
import pkg/libp2p/cid
|
||||||
|
|
||||||
|
import pkg/questionable
|
||||||
|
|
||||||
|
import ../../units
|
||||||
|
|
||||||
|
import ../../merkletree
|
||||||
|
import ../../blocktype
|
||||||
|
|
||||||
|
const
|
||||||
|
MaxBlockSize* = 100.MiBs.uint
|
||||||
|
MaxMessageSize* = 100.MiBs.uint
|
||||||
|
|
||||||
type
|
type
|
||||||
WantType* = enum
|
WantType* = enum
|
||||||
WantBlock = 0,
|
WantBlock = 0,
|
||||||
WantHave = 1
|
WantHave = 1
|
||||||
|
|
||||||
Entry* = object
|
WantListEntry* = object
|
||||||
`block`*: seq[byte] # The block cid
|
address*: BlockAddress
|
||||||
priority*: int32 # The priority (normalized). default to 1
|
priority*: int32 # The priority (normalized). default to 1
|
||||||
cancel*: bool # Whether this revokes an entry
|
cancel*: bool # Whether this revokes an entry
|
||||||
wantType*: WantType # Note: defaults to enum 0, ie Block
|
wantType*: WantType # Note: defaults to enum 0, ie Block
|
||||||
sendDontHave*: bool # Note: defaults to false
|
sendDontHave*: bool # Note: defaults to false
|
||||||
|
inFlight*: bool # Whether block sending is in progress. Not serialized.
|
||||||
|
|
||||||
Wantlist* = object
|
WantList* = object
|
||||||
entries*: seq[Entry] # A list of wantlist entries
|
entries*: seq[WantListEntry] # A list of wantList entries
|
||||||
full*: bool # Whether this is the full wantlist. default to false
|
full*: bool # Whether this is the full wantList. default to false
|
||||||
|
|
||||||
Block* = object
|
BlockDelivery* = object
|
||||||
prefix*: seq[byte] # CID prefix (cid version, multicodec and multihash prefix (type + length)
|
blk*: Block
|
||||||
data*: seq[byte]
|
address*: BlockAddress
|
||||||
|
proof*: ?CodexProof # Present only if `address.leaf` is true
|
||||||
|
|
||||||
BlockPresenceType* = enum
|
BlockPresenceType* = enum
|
||||||
Have = 0,
|
Have = 0,
|
||||||
DontHave = 1
|
DontHave = 1
|
||||||
|
|
||||||
BlockPresence* = object
|
BlockPresence* = object
|
||||||
cid*: seq[byte] # The block cid
|
address*: BlockAddress
|
||||||
`type`*: BlockPresenceType
|
`type`*: BlockPresenceType
|
||||||
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
|
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
|
||||||
|
|
||||||
|
@ -42,8 +56,8 @@ type
|
||||||
update*: seq[byte] # Signed Nitro state, serialized as JSON
|
update*: seq[byte] # Signed Nitro state, serialized as JSON
|
||||||
|
|
||||||
Message* = object
|
Message* = object
|
||||||
wantlist*: Wantlist
|
wantList*: WantList
|
||||||
payload*: seq[Block]
|
payload*: seq[BlockDelivery]
|
||||||
blockPresences*: seq[BlockPresence]
|
blockPresences*: seq[BlockPresence]
|
||||||
pendingBytes*: uint
|
pendingBytes*: uint
|
||||||
account*: AccountMessage
|
account*: AccountMessage
|
||||||
|
@ -53,9 +67,20 @@ type
|
||||||
# Encoding Message into seq[byte] in Protobuf format
|
# Encoding Message into seq[byte] in Protobuf format
|
||||||
#
|
#
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
|
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
|
||||||
var ipb = initProtoBuffer()
|
var ipb = initProtoBuffer()
|
||||||
ipb.write(1, value.`block`)
|
ipb.write(1, value.leaf.uint)
|
||||||
|
if value.leaf:
|
||||||
|
ipb.write(2, value.treeCid.data.buffer)
|
||||||
|
ipb.write(3, value.index.uint64)
|
||||||
|
else:
|
||||||
|
ipb.write(4, value.cid.data.buffer)
|
||||||
|
ipb.finish()
|
||||||
|
pb.write(field, ipb)
|
||||||
|
|
||||||
|
proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
|
||||||
|
var ipb = initProtoBuffer()
|
||||||
|
ipb.write(1, value.address)
|
||||||
ipb.write(2, value.priority.uint64)
|
ipb.write(2, value.priority.uint64)
|
||||||
ipb.write(3, value.cancel.uint)
|
ipb.write(3, value.cancel.uint)
|
||||||
ipb.write(4, value.wantType.uint)
|
ipb.write(4, value.wantType.uint)
|
||||||
|
@ -63,7 +88,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
|
||||||
ipb.finish()
|
ipb.finish()
|
||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
|
proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
||||||
var ipb = initProtoBuffer()
|
var ipb = initProtoBuffer()
|
||||||
for v in value.entries:
|
for v in value.entries:
|
||||||
ipb.write(1, v)
|
ipb.write(1, v)
|
||||||
|
@ -71,16 +96,20 @@ proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
|
||||||
ipb.finish()
|
ipb.finish()
|
||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, value: Block) =
|
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||||
var ipb = initProtoBuffer()
|
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
||||||
ipb.write(1, value.prefix)
|
ipb.write(1, value.blk.cid.data.buffer)
|
||||||
ipb.write(2, value.data)
|
ipb.write(2, value.blk.data)
|
||||||
|
ipb.write(3, value.address)
|
||||||
|
if value.address.leaf:
|
||||||
|
if proof =? value.proof:
|
||||||
|
ipb.write(4, proof.encode())
|
||||||
ipb.finish()
|
ipb.finish()
|
||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
|
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
|
||||||
var ipb = initProtoBuffer()
|
var ipb = initProtoBuffer()
|
||||||
ipb.write(1, value.cid)
|
ipb.write(1, value.address)
|
||||||
ipb.write(2, value.`type`.uint)
|
ipb.write(2, value.`type`.uint)
|
||||||
ipb.write(3, value.price)
|
ipb.write(3, value.price)
|
||||||
ipb.finish()
|
ipb.finish()
|
||||||
|
@ -98,9 +127,9 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
||||||
ipb.finish()
|
ipb.finish()
|
||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
|
||||||
proc ProtobufEncode*(value: Message): seq[byte] =
|
proc protobufEncode*(value: Message): seq[byte] =
|
||||||
var ipb = initProtoBuffer()
|
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
||||||
ipb.write(1, value.wantlist)
|
ipb.write(1, value.wantList)
|
||||||
for v in value.payload:
|
for v in value.payload:
|
||||||
ipb.write(3, v)
|
ipb.write(3, v)
|
||||||
for v in value.blockPresences:
|
for v in value.blockPresences:
|
||||||
|
@ -115,12 +144,40 @@ proc ProtobufEncode*(value: Message): seq[byte] =
|
||||||
#
|
#
|
||||||
# Decoding Message from seq[byte] in Protobuf format
|
# Decoding Message from seq[byte] in Protobuf format
|
||||||
#
|
#
|
||||||
|
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
|
||||||
proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
|
|
||||||
var
|
var
|
||||||
value = Entry()
|
value: BlockAddress
|
||||||
|
leaf: bool
|
||||||
field: uint64
|
field: uint64
|
||||||
discard ? pb.getField(1, value.`block`)
|
cidBuf = newSeq[byte]()
|
||||||
|
|
||||||
|
if ? pb.getField(1, field):
|
||||||
|
leaf = bool(field)
|
||||||
|
|
||||||
|
if leaf:
|
||||||
|
var
|
||||||
|
treeCid: Cid
|
||||||
|
index: Natural
|
||||||
|
if ? pb.getField(2, cidBuf):
|
||||||
|
treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
|
if ? pb.getField(3, field):
|
||||||
|
index = field
|
||||||
|
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
|
||||||
|
else:
|
||||||
|
var cid: Cid
|
||||||
|
if ? pb.getField(4, cidBuf):
|
||||||
|
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
|
value = BlockAddress(leaf: false, cid: cid)
|
||||||
|
|
||||||
|
ok(value)
|
||||||
|
|
||||||
|
proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] =
|
||||||
|
var
|
||||||
|
value = WantListEntry()
|
||||||
|
field: uint64
|
||||||
|
ipb: ProtoBuffer
|
||||||
|
if ? pb.getField(1, ipb):
|
||||||
|
value.address = ? BlockAddress.decode(ipb)
|
||||||
if ? pb.getField(2, field):
|
if ? pb.getField(2, field):
|
||||||
value.priority = int32(field)
|
value.priority = int32(field)
|
||||||
if ? pb.getField(3, field):
|
if ? pb.getField(3, field):
|
||||||
|
@ -131,30 +188,52 @@ proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
|
||||||
value.sendDontHave = bool(field)
|
value.sendDontHave = bool(field)
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc decode*(_: type Wantlist, pb: ProtoBuffer): ProtoResult[Wantlist] =
|
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
|
||||||
var
|
var
|
||||||
value = Wantlist()
|
value = WantList()
|
||||||
field: uint64
|
field: uint64
|
||||||
sublist: seq[seq[byte]]
|
sublist: seq[seq[byte]]
|
||||||
if ? pb.getRepeatedField(1, sublist):
|
if ? pb.getRepeatedField(1, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.entries.add(? Entry.decode(initProtoBuffer(item)))
|
value.entries.add(? WantListEntry.decode(initProtoBuffer(item)))
|
||||||
if ? pb.getField(2, field):
|
if ? pb.getField(2, field):
|
||||||
value.full = bool(field)
|
value.full = bool(field)
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc decode*(_: type Block, pb: ProtoBuffer): ProtoResult[Block] =
|
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
|
||||||
var
|
var
|
||||||
value = Block()
|
value = BlockDelivery()
|
||||||
discard ? pb.getField(1, value.prefix)
|
dataBuf = newSeq[byte]()
|
||||||
discard ? pb.getField(2, value.data)
|
cidBuf = newSeq[byte]()
|
||||||
|
cid: Cid
|
||||||
|
ipb: ProtoBuffer
|
||||||
|
|
||||||
|
if ? pb.getField(1, cidBuf):
|
||||||
|
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
|
if ? pb.getField(2, dataBuf):
|
||||||
|
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
|
if ? pb.getField(3, ipb):
|
||||||
|
value.address = ? BlockAddress.decode(ipb)
|
||||||
|
|
||||||
|
if value.address.leaf:
|
||||||
|
var proofBuf = newSeq[byte]()
|
||||||
|
if ? pb.getField(4, proofBuf):
|
||||||
|
let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
|
value.proof = proof.some
|
||||||
|
else:
|
||||||
|
value.proof = CodexProof.none
|
||||||
|
else:
|
||||||
|
value.proof = CodexProof.none
|
||||||
|
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
|
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
|
||||||
var
|
var
|
||||||
value = BlockPresence()
|
value = BlockPresence()
|
||||||
field: uint64
|
field: uint64
|
||||||
discard ? pb.getField(1, value.cid)
|
ipb: ProtoBuffer
|
||||||
|
if ? pb.getField(1, ipb):
|
||||||
|
value.address = ? BlockAddress.decode(ipb)
|
||||||
if ? pb.getField(2, field):
|
if ? pb.getField(2, field):
|
||||||
value.`type` = BlockPresenceType(field)
|
value.`type` = BlockPresenceType(field)
|
||||||
discard ? pb.getField(3, value.price)
|
discard ? pb.getField(3, value.price)
|
||||||
|
@ -172,17 +251,17 @@ proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChan
|
||||||
discard ? pb.getField(1, value.update)
|
discard ? pb.getField(1, value.update)
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc ProtobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||||
var
|
var
|
||||||
value = Message()
|
value = Message()
|
||||||
pb = initProtoBuffer(msg)
|
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
||||||
ipb: ProtoBuffer
|
ipb: ProtoBuffer
|
||||||
sublist: seq[seq[byte]]
|
sublist: seq[seq[byte]]
|
||||||
if ? pb.getField(1, ipb):
|
if ? pb.getField(1, ipb):
|
||||||
value.wantlist = ? Wantlist.decode(ipb)
|
value.wantList = ? WantList.decode(ipb)
|
||||||
if ? pb.getRepeatedField(3, sublist):
|
if ? pb.getRepeatedField(3, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.payload.add(? Block.decode(initProtoBuffer(item)))
|
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
|
||||||
if ? pb.getRepeatedField(4, sublist):
|
if ? pb.getRepeatedField(4, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))
|
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))
|
||||||
|
|
|
@ -5,6 +5,8 @@ import pkg/questionable/results
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
import ./blockexc
|
import ./blockexc
|
||||||
|
|
||||||
|
import ../../blocktype
|
||||||
|
|
||||||
export questionable
|
export questionable
|
||||||
export stint
|
export stint
|
||||||
export BlockPresenceType
|
export BlockPresenceType
|
||||||
|
@ -14,7 +16,7 @@ upraises.push: {.upraises: [].}
|
||||||
type
|
type
|
||||||
PresenceMessage* = blockexc.BlockPresence
|
PresenceMessage* = blockexc.BlockPresence
|
||||||
Presence* = object
|
Presence* = object
|
||||||
cid*: Cid
|
address*: BlockAddress
|
||||||
have*: bool
|
have*: bool
|
||||||
price*: UInt256
|
price*: UInt256
|
||||||
|
|
||||||
|
@ -24,19 +26,18 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
|
||||||
UInt256.fromBytesBE(bytes).some
|
UInt256.fromBytesBE(bytes).some
|
||||||
|
|
||||||
func init*(_: type Presence, message: PresenceMessage): ?Presence =
|
func init*(_: type Presence, message: PresenceMessage): ?Presence =
|
||||||
without cid =? Cid.init(message.cid) and
|
without price =? UInt256.parse(message.price):
|
||||||
price =? UInt256.parse(message.price):
|
|
||||||
return none Presence
|
return none Presence
|
||||||
|
|
||||||
some Presence(
|
some Presence(
|
||||||
cid: cid,
|
address: message.address,
|
||||||
have: message.`type` == BlockPresenceType.Have,
|
have: message.`type` == BlockPresenceType.Have,
|
||||||
price: price
|
price: price
|
||||||
)
|
)
|
||||||
|
|
||||||
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
||||||
PresenceMessage(
|
PresenceMessage(
|
||||||
cid: presence.cid.data.buffer,
|
address: presence.address,
|
||||||
`type`: if presence.have:
|
`type`: if presence.have:
|
||||||
BlockPresenceType.Have
|
BlockPresenceType.Have
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -8,120 +8,78 @@
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/tables
|
import std/tables
|
||||||
|
import std/sugar
|
||||||
|
|
||||||
export tables
|
export tables
|
||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p/[cid, multicodec, multihash]
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/chronicles
|
|
||||||
|
|
||||||
import ./formats
|
import ./units
|
||||||
|
import ./utils
|
||||||
import ./errors
|
import ./errors
|
||||||
|
import ./logutils
|
||||||
|
import ./utils/json
|
||||||
|
import ./codextypes
|
||||||
|
|
||||||
export errors, formats
|
export errors, logutils, units, codextypes
|
||||||
|
|
||||||
const
|
|
||||||
# Size of blocks for storage / network exchange,
|
|
||||||
# should be divisible by 31 for PoR and by 64 for Leopard ECC
|
|
||||||
BlockSize* = 31 * 64 * 33
|
|
||||||
|
|
||||||
type
|
type
|
||||||
Block* = ref object of RootObj
|
Block* = ref object of RootObj
|
||||||
cid*: Cid
|
cid*: Cid
|
||||||
data*: seq[byte]
|
data*: seq[byte]
|
||||||
|
|
||||||
BlockNotFoundError* = object of CodexError
|
BlockAddress* = object
|
||||||
|
case leaf*: bool
|
||||||
|
of true:
|
||||||
|
treeCid* {.serialize.}: Cid
|
||||||
|
index* {.serialize.}: Natural
|
||||||
|
else:
|
||||||
|
cid* {.serialize.}: Cid
|
||||||
|
|
||||||
template EmptyCid*: untyped =
|
logutils.formatIt(LogFormat.textLines, BlockAddress):
|
||||||
var
|
if it.leaf:
|
||||||
emptyCid {.global, threadvar.}:
|
"treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
|
||||||
array[CIDv0..CIDv1, Table[MultiCodec, Cid]]
|
else:
|
||||||
|
"cid: " & shortLog($it.cid)
|
||||||
|
|
||||||
once:
|
logutils.formatIt(LogFormat.json, BlockAddress): %it
|
||||||
emptyCid = [
|
|
||||||
CIDv0: {
|
|
||||||
multiCodec("sha2-256"): Cid
|
|
||||||
.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
|
|
||||||
.get()
|
|
||||||
}.toTable,
|
|
||||||
CIDv1: {
|
|
||||||
multiCodec("sha2-256"): Cid
|
|
||||||
.init("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")
|
|
||||||
.get()
|
|
||||||
}.toTable,
|
|
||||||
]
|
|
||||||
|
|
||||||
emptyCid
|
proc `==`*(a, b: BlockAddress): bool =
|
||||||
|
a.leaf == b.leaf and
|
||||||
|
(
|
||||||
|
if a.leaf:
|
||||||
|
a.treeCid == b.treeCid and a.index == b.index
|
||||||
|
else:
|
||||||
|
a.cid == b.cid
|
||||||
|
)
|
||||||
|
|
||||||
template EmptyDigests*: untyped =
|
proc `$`*(a: BlockAddress): string =
|
||||||
var
|
if a.leaf:
|
||||||
emptyDigests {.global, threadvar.}:
|
"treeCid: " & $a.treeCid & ", index: " & $a.index
|
||||||
array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]]
|
else:
|
||||||
|
"cid: " & $a.cid
|
||||||
|
|
||||||
once:
|
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||||
emptyDigests = [
|
if a.leaf:
|
||||||
CIDv0: {
|
a.treeCid
|
||||||
multiCodec("sha2-256"): EmptyCid[CIDv0]
|
else:
|
||||||
.catch
|
a.cid
|
||||||
.get()[multiCodec("sha2-256")]
|
|
||||||
.catch
|
|
||||||
.get()
|
|
||||||
.mhash
|
|
||||||
.get()
|
|
||||||
}.toTable,
|
|
||||||
CIDv1: {
|
|
||||||
multiCodec("sha2-256"): EmptyCid[CIDv1]
|
|
||||||
.catch
|
|
||||||
.get()[multiCodec("sha2-256")]
|
|
||||||
.catch
|
|
||||||
.get()
|
|
||||||
.mhash
|
|
||||||
.get()
|
|
||||||
}.toTable,
|
|
||||||
]
|
|
||||||
|
|
||||||
emptyDigests
|
proc address*(b: Block): BlockAddress =
|
||||||
|
BlockAddress(leaf: false, cid: b.cid)
|
||||||
|
|
||||||
template EmptyBlock*: untyped =
|
proc init*(_: type BlockAddress, cid: Cid): BlockAddress =
|
||||||
var
|
BlockAddress(leaf: false, cid: cid)
|
||||||
emptyBlock {.global, threadvar.}:
|
|
||||||
array[CIDv0..CIDv1, Table[MultiCodec, Block]]
|
|
||||||
|
|
||||||
once:
|
proc init*(_: type BlockAddress, treeCid: Cid, index: Natural): BlockAddress =
|
||||||
emptyBlock = [
|
BlockAddress(leaf: true, treeCid: treeCid, index: index)
|
||||||
CIDv0: {
|
|
||||||
multiCodec("sha2-256"): Block(
|
|
||||||
cid: EmptyCid[CIDv0][multiCodec("sha2-256")])
|
|
||||||
}.toTable,
|
|
||||||
CIDv1: {
|
|
||||||
multiCodec("sha2-256"): Block(
|
|
||||||
cid: EmptyCid[CIDv1][multiCodec("sha2-256")])
|
|
||||||
}.toTable,
|
|
||||||
]
|
|
||||||
|
|
||||||
emptyBlock
|
|
||||||
|
|
||||||
proc isEmpty*(cid: Cid): bool =
|
|
||||||
cid == EmptyCid[cid.cidver]
|
|
||||||
.catch
|
|
||||||
.get()[cid.mhash.get().mcodec]
|
|
||||||
.catch
|
|
||||||
.get()
|
|
||||||
|
|
||||||
proc isEmpty*(blk: Block): bool =
|
|
||||||
blk.cid.isEmpty
|
|
||||||
|
|
||||||
proc emptyBlock*(cid: Cid): Block =
|
|
||||||
EmptyBlock[cid.cidver]
|
|
||||||
.catch
|
|
||||||
.get()[cid.mhash.get().mcodec]
|
|
||||||
.catch
|
|
||||||
.get()
|
|
||||||
|
|
||||||
proc `$`*(b: Block): string =
|
proc `$`*(b: Block): string =
|
||||||
result &= "cid: " & $b.cid
|
result &= "cid: " & $b.cid
|
||||||
|
@ -131,8 +89,10 @@ func new*(
|
||||||
T: type Block,
|
T: type Block,
|
||||||
data: openArray[byte] = [],
|
data: openArray[byte] = [],
|
||||||
version = CIDv1,
|
version = CIDv1,
|
||||||
mcodec = multiCodec("sha2-256"),
|
mcodec = Sha256HashCodec,
|
||||||
codec = multiCodec("raw")): ?!T =
|
codec = BlockCodec): ?!Block =
|
||||||
|
## creates a new block for both storage and network IO
|
||||||
|
##
|
||||||
|
|
||||||
let
|
let
|
||||||
hash = ? MultiHash.digest($mcodec, data).mapFailure
|
hash = ? MultiHash.digest($mcodec, data).mapFailure
|
||||||
|
@ -144,21 +104,39 @@ func new*(
|
||||||
cid: cid,
|
cid: cid,
|
||||||
data: @data).success
|
data: @data).success
|
||||||
|
|
||||||
func new*(
|
proc new*(
|
||||||
T: type Block,
|
T: type Block,
|
||||||
cid: Cid,
|
cid: Cid,
|
||||||
data: openArray[byte],
|
data: openArray[byte],
|
||||||
verify: bool = true): ?!T =
|
verify: bool = true
|
||||||
|
): ?!Block =
|
||||||
|
## creates a new block for both storage and network IO
|
||||||
|
##
|
||||||
|
|
||||||
let
|
if verify:
|
||||||
mhash = ? cid.mhash.mapFailure
|
let
|
||||||
b = ? Block.new(
|
mhash = ? cid.mhash.mapFailure
|
||||||
data = @data,
|
computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure
|
||||||
version = cid.cidver,
|
computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
|
||||||
codec = cid.mcodec,
|
if computedCid != cid:
|
||||||
mcodec = mhash.mcodec)
|
return "Cid doesn't match the data".failure
|
||||||
|
|
||||||
if verify and cid != b.cid:
|
return Block(
|
||||||
return "Cid and content don't match!".failure
|
cid: cid,
|
||||||
|
data: @data
|
||||||
|
).success
|
||||||
|
|
||||||
success b
|
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
|
||||||
|
emptyCid(version, hcodec, BlockCodec)
|
||||||
|
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
|
||||||
|
|
||||||
|
proc emptyBlock*(cid: Cid): ?!Block =
|
||||||
|
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
||||||
|
emptyBlock(cid.cidver, mhash.mcodec))
|
||||||
|
|
||||||
|
proc isEmpty*(cid: Cid): bool =
|
||||||
|
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
||||||
|
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
|
||||||
|
|
||||||
|
proc isEmpty*(blk: Block): bool =
|
||||||
|
blk.cid.isEmpty
|
||||||
|
|
|
@ -13,18 +13,18 @@ import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p except shuffle
|
import pkg/libp2p except shuffle
|
||||||
|
|
||||||
import ./blocktype
|
import ./blocktype
|
||||||
|
import ./logutils
|
||||||
|
|
||||||
export blocktype
|
export blocktype
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultChunkSize* = BlockSize
|
DefaultChunkSize* = DefaultBlockSize
|
||||||
|
|
||||||
type
|
type
|
||||||
# default reader type
|
# default reader type
|
||||||
|
@ -35,7 +35,7 @@ type
|
||||||
Chunker* = ref object
|
Chunker* = ref object
|
||||||
reader*: Reader # Procedure called to actually read the data
|
reader*: Reader # Procedure called to actually read the data
|
||||||
offset*: int # Bytes read so far (position in the stream)
|
offset*: int # Bytes read so far (position in the stream)
|
||||||
chunkSize*: Natural # Size of each chunk
|
chunkSize*: NBytes # Size of each chunk
|
||||||
pad*: bool # Pad last chunk to chunkSize?
|
pad*: bool # Pad last chunk to chunkSize?
|
||||||
|
|
||||||
FileChunker* = Chunker
|
FileChunker* = Chunker
|
||||||
|
@ -46,7 +46,7 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
||||||
## the instantiated chunker
|
## the instantiated chunker
|
||||||
##
|
##
|
||||||
|
|
||||||
var buff = newSeq[byte](c.chunkSize)
|
var buff = newSeq[byte](c.chunkSize.int)
|
||||||
let read = await c.reader(cast[ChunkBuffer](addr buff[0]), buff.len)
|
let read = await c.reader(cast[ChunkBuffer](addr buff[0]), buff.len)
|
||||||
|
|
||||||
if read <= 0:
|
if read <= 0:
|
||||||
|
@ -59,22 +59,26 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
||||||
|
|
||||||
return move buff
|
return move buff
|
||||||
|
|
||||||
func new*(
|
proc new*(
|
||||||
T: type Chunker,
|
T: type Chunker,
|
||||||
reader: Reader,
|
reader: Reader,
|
||||||
chunkSize = DefaultChunkSize,
|
chunkSize = DefaultChunkSize,
|
||||||
pad = true): T =
|
pad = true
|
||||||
|
): Chunker =
|
||||||
T(reader: reader,
|
## create a new Chunker instance
|
||||||
|
##
|
||||||
|
Chunker(
|
||||||
|
reader: reader,
|
||||||
offset: 0,
|
offset: 0,
|
||||||
chunkSize: chunkSize,
|
chunkSize: chunkSize,
|
||||||
pad: pad)
|
pad: pad)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type LPStreamChunker,
|
T: type LPStreamChunker,
|
||||||
stream: LPStream,
|
stream: LPStream,
|
||||||
chunkSize = DefaultChunkSize,
|
chunkSize = DefaultChunkSize,
|
||||||
pad = true): T =
|
pad = true
|
||||||
|
): LPStreamChunker =
|
||||||
## create the default File chunker
|
## create the default File chunker
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -86,22 +90,25 @@ proc new*(
|
||||||
res += await stream.readOnce(addr data[res], len - res)
|
res += await stream.readOnce(addr data[res], len - res)
|
||||||
except LPStreamEOFError as exc:
|
except LPStreamEOFError as exc:
|
||||||
trace "LPStreamChunker stream Eof", exc = exc.msg
|
trace "LPStreamChunker stream Eof", exc = exc.msg
|
||||||
|
except CancelledError as error:
|
||||||
|
raise error
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "CatchableError exception", exc = exc.msg
|
trace "CatchableError exception", exc = exc.msg
|
||||||
raise newException(Defect, exc.msg)
|
raise newException(Defect, exc.msg)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
T.new(
|
LPStreamChunker.new(
|
||||||
reader = reader,
|
reader = reader,
|
||||||
chunkSize = chunkSize,
|
chunkSize = chunkSize,
|
||||||
pad = pad)
|
pad = pad)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type FileChunker,
|
T: type FileChunker,
|
||||||
file: File,
|
file: File,
|
||||||
chunkSize = DefaultChunkSize,
|
chunkSize = DefaultChunkSize,
|
||||||
pad = true): T =
|
pad = true
|
||||||
|
): FileChunker =
|
||||||
## create the default File chunker
|
## create the default File chunker
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -117,13 +124,15 @@ proc new*(
|
||||||
total += res
|
total += res
|
||||||
except IOError as exc:
|
except IOError as exc:
|
||||||
trace "Exception reading file", exc = exc.msg
|
trace "Exception reading file", exc = exc.msg
|
||||||
|
except CancelledError as error:
|
||||||
|
raise error
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "CatchableError exception", exc = exc.msg
|
trace "CatchableError exception", exc = exc.msg
|
||||||
raise newException(Defect, exc.msg)
|
raise newException(Defect, exc.msg)
|
||||||
|
|
||||||
return total
|
return total
|
||||||
|
|
||||||
T.new(
|
FileChunker.new(
|
||||||
reader = reader,
|
reader = reader,
|
||||||
chunkSize = chunkSize,
|
chunkSize = chunkSize,
|
||||||
pad = pad)
|
pad = pad)
|
||||||
|
|
|
@ -1,16 +1,24 @@
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
import pkg/stew/endians2
|
||||||
|
import pkg/upraises
|
||||||
|
import pkg/stint
|
||||||
|
|
||||||
type
|
type
|
||||||
Clock* = ref object of RootObj
|
Clock* = ref object of RootObj
|
||||||
SecondsSince1970* = int64
|
SecondsSince1970* = int64
|
||||||
Timeout* = object of CatchableError
|
Timeout* = object of CatchableError
|
||||||
|
|
||||||
method now*(clock: Clock): SecondsSince1970 {.base.} =
|
method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
proc waitUntil*(clock: Clock, time: SecondsSince1970) {.async.} =
|
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
||||||
while clock.now() < time:
|
raiseAssert "not implemented"
|
||||||
await sleepAsync(1.seconds)
|
|
||||||
|
method start*(clock: Clock) {.base, async.} =
|
||||||
|
discard
|
||||||
|
|
||||||
|
method stop*(clock: Clock) {.base, async.} =
|
||||||
|
discard
|
||||||
|
|
||||||
proc withTimeout*(future: Future[void],
|
proc withTimeout*(future: Future[void],
|
||||||
clock: Clock,
|
clock: Clock,
|
||||||
|
@ -23,3 +31,14 @@ proc withTimeout*(future: Future[void],
|
||||||
if not future.completed:
|
if not future.completed:
|
||||||
await future.cancelAndWait()
|
await future.cancelAndWait()
|
||||||
raise newException(Timeout, "Timed out")
|
raise newException(Timeout, "Timed out")
|
||||||
|
|
||||||
|
proc toBytes*(i: SecondsSince1970): seq[byte] =
|
||||||
|
let asUint = cast[uint64](i)
|
||||||
|
@(asUint.toBytes)
|
||||||
|
|
||||||
|
proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 =
|
||||||
|
let asUint = uint64.fromBytes(bytes)
|
||||||
|
cast[int64](asUint)
|
||||||
|
|
||||||
|
proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 =
|
||||||
|
bigint.truncate(int64)
|
||||||
|
|
264
codex/codex.nim
264
codex/codex.nim
|
@ -8,10 +8,11 @@
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
import std/strutils
|
||||||
import std/os
|
import std/os
|
||||||
import std/sugar
|
import std/tables
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/presto
|
import pkg/presto
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
@ -20,38 +21,140 @@ import pkg/confutils/defs
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
import pkg/stew/shims/net as stewnet
|
import pkg/stew/shims/net as stewnet
|
||||||
|
import pkg/datastore
|
||||||
|
import pkg/ethers except Rng
|
||||||
|
import pkg/stew/io2
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import ./node
|
import ./node
|
||||||
import ./conf
|
import ./conf
|
||||||
import ./rng
|
import ./rng
|
||||||
import ./rest/api
|
import ./rest/api
|
||||||
import ./stores
|
import ./stores
|
||||||
|
import ./slots
|
||||||
import ./blockexchange
|
import ./blockexchange
|
||||||
import ./utils/fileutils
|
import ./utils/fileutils
|
||||||
import ./erasure
|
import ./erasure
|
||||||
import ./discovery
|
import ./discovery
|
||||||
import ./contracts
|
import ./contracts
|
||||||
import ./utils/keyutils
|
import ./systemclock
|
||||||
|
import ./contracts/clock
|
||||||
|
import ./contracts/deployment
|
||||||
import ./utils/addrutils
|
import ./utils/addrutils
|
||||||
|
import ./namespaces
|
||||||
|
import ./codextypes
|
||||||
|
import ./logutils
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex node"
|
topics = "codex node"
|
||||||
|
|
||||||
type
|
type
|
||||||
CodexServer* = ref object
|
CodexServer* = ref object
|
||||||
runHandle: Future[void]
|
|
||||||
config: CodexConf
|
config: CodexConf
|
||||||
restServer: RestServerRef
|
restServer: RestServerRef
|
||||||
codexNode: CodexNodeRef
|
codexNode: CodexNodeRef
|
||||||
|
repoStore: RepoStore
|
||||||
|
maintenance: BlockMaintainer
|
||||||
|
taskpool: Taskpool
|
||||||
|
|
||||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||||
|
EthWallet = ethers.Wallet
|
||||||
|
|
||||||
|
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||||
|
var sleepTime = 1
|
||||||
|
trace "Checking sync state of Ethereum provider..."
|
||||||
|
while await provider.isSyncing:
|
||||||
|
notice "Waiting for Ethereum provider to sync..."
|
||||||
|
await sleepAsync(sleepTime.seconds)
|
||||||
|
if sleepTime < 10:
|
||||||
|
inc sleepTime
|
||||||
|
trace "Ethereum provider is synced."
|
||||||
|
|
||||||
|
proc bootstrapInteractions(
|
||||||
|
s: CodexServer): Future[void] {.async.} =
|
||||||
|
## bootstrap interactions and return contracts
|
||||||
|
## using clients, hosts, validators pairings
|
||||||
|
##
|
||||||
|
let
|
||||||
|
config = s.config
|
||||||
|
repo = s.repoStore
|
||||||
|
|
||||||
|
if config.persistence:
|
||||||
|
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||||
|
error "Persistence enabled, but no Ethereum account was set"
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||||
|
await waitForSync(provider)
|
||||||
|
var signer: Signer
|
||||||
|
if account =? config.ethAccount:
|
||||||
|
signer = provider.getSigner(account)
|
||||||
|
elif keyFile =? config.ethPrivateKey:
|
||||||
|
without isSecure =? checkSecureFile(keyFile):
|
||||||
|
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||||
|
quit QuitFailure
|
||||||
|
if not isSecure:
|
||||||
|
error "Ethereum private key file does not have safe file permissions"
|
||||||
|
quit QuitFailure
|
||||||
|
without key =? keyFile.readAllChars():
|
||||||
|
error "Unable to read Ethereum private key file"
|
||||||
|
quit QuitFailure
|
||||||
|
without wallet =? EthWallet.new(key.strip(), provider):
|
||||||
|
error "Invalid Ethereum private key in file"
|
||||||
|
quit QuitFailure
|
||||||
|
signer = wallet
|
||||||
|
|
||||||
|
let deploy = Deployment.new(provider, config)
|
||||||
|
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||||
|
error "No Marketplace address was specified or there is no known address for the current network"
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||||
|
let market = OnChainMarket.new(marketplace)
|
||||||
|
let clock = OnChainClock.new(provider)
|
||||||
|
|
||||||
|
var client: ?ClientInteractions
|
||||||
|
var host: ?HostInteractions
|
||||||
|
var validator: ?ValidatorInteractions
|
||||||
|
|
||||||
|
if config.validator or config.persistence:
|
||||||
|
s.codexNode.clock = clock
|
||||||
|
else:
|
||||||
|
s.codexNode.clock = SystemClock()
|
||||||
|
|
||||||
|
if config.persistence:
|
||||||
|
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||||
|
# and hence the proof failure will always be 0.
|
||||||
|
when codex_enable_proof_failures:
|
||||||
|
let proofFailures = config.simulateProofFailures
|
||||||
|
if proofFailures > 0:
|
||||||
|
warn "Enabling proof failure simulation!"
|
||||||
|
else:
|
||||||
|
let proofFailures = 0
|
||||||
|
if config.simulateProofFailures > 0:
|
||||||
|
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||||
|
|
||||||
|
let purchasing = Purchasing.new(market, clock)
|
||||||
|
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||||
|
client = some ClientInteractions.new(clock, purchasing)
|
||||||
|
host = some HostInteractions.new(clock, sales)
|
||||||
|
|
||||||
|
if config.validator:
|
||||||
|
let validation = Validation.new(clock, market, config.validatorMaxSlots)
|
||||||
|
validator = some ValidatorInteractions.new(clock, validation)
|
||||||
|
|
||||||
|
s.codexNode.contracts = (client, host, validator)
|
||||||
|
|
||||||
proc start*(s: CodexServer) {.async.} =
|
proc start*(s: CodexServer) {.async.} =
|
||||||
s.restServer.start()
|
trace "Starting codex node", config = $s.config
|
||||||
await s.codexNode.start()
|
|
||||||
|
await s.repoStore.start()
|
||||||
|
s.maintenance.start()
|
||||||
|
|
||||||
|
await s.codexNode.switch.start()
|
||||||
|
|
||||||
let
|
let
|
||||||
# TODO: Can't define this as constants, pity
|
# TODO: Can't define these as constants, pity
|
||||||
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
|
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
|
||||||
.expect("Should create multiaddress")
|
.expect("Should create multiaddress")
|
||||||
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
|
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
|
||||||
|
@ -75,32 +178,29 @@ proc start*(s: CodexServer) {.async.} =
|
||||||
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
||||||
s.codexNode.discovery.updateDhtRecord(s.config.nat, s.config.discoveryPort)
|
s.codexNode.discovery.updateDhtRecord(s.config.nat, s.config.discoveryPort)
|
||||||
|
|
||||||
s.runHandle = newFuture[void]("codex.runHandle")
|
await s.bootstrapInteractions()
|
||||||
await s.runHandle
|
await s.codexNode.start()
|
||||||
|
s.restServer.start()
|
||||||
|
|
||||||
proc stop*(s: CodexServer) {.async.} =
|
proc stop*(s: CodexServer) {.async.} =
|
||||||
|
notice "Stopping codex node"
|
||||||
|
|
||||||
|
|
||||||
|
s.taskpool.syncAll()
|
||||||
|
s.taskpool.shutdown()
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
s.restServer.stop(), s.codexNode.stop())
|
s.restServer.stop(),
|
||||||
|
s.codexNode.switch.stop(),
|
||||||
s.runHandle.complete()
|
s.codexNode.stop(),
|
||||||
|
s.repoStore.stop(),
|
||||||
proc new(_: type ContractInteractions, config: CodexConf): ?ContractInteractions =
|
s.maintenance.stop())
|
||||||
if not config.persistence:
|
|
||||||
if config.ethAccount.isSome:
|
|
||||||
warn "Ethereum account was set, but persistence is not enabled"
|
|
||||||
return
|
|
||||||
|
|
||||||
without account =? config.ethAccount:
|
|
||||||
error "Persistence enabled, but no Ethereum account was set"
|
|
||||||
quit QuitFailure
|
|
||||||
|
|
||||||
if deployment =? config.ethDeployment:
|
|
||||||
ContractInteractions.new(config.ethProvider, account, deployment)
|
|
||||||
else:
|
|
||||||
ContractInteractions.new(config.ethProvider, account)
|
|
||||||
|
|
||||||
proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey): T =
|
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
T: type CodexServer,
|
||||||
|
config: CodexConf,
|
||||||
|
privateKey: CodexPrivateKey): CodexServer =
|
||||||
|
## create CodexServer including setting up datastore, repostore, etc
|
||||||
let
|
let
|
||||||
switch = SwitchBuilder
|
switch = SwitchBuilder
|
||||||
.new()
|
.new()
|
||||||
|
@ -118,13 +218,22 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
||||||
var
|
var
|
||||||
cache: CacheStore = nil
|
cache: CacheStore = nil
|
||||||
|
|
||||||
if config.cacheSize > 0:
|
if config.cacheSize > 0'nb:
|
||||||
cache = CacheStore.new(cacheSize = config.cacheSize * MiB)
|
cache = CacheStore.new(cacheSize = config.cacheSize)
|
||||||
|
## Is unused?
|
||||||
|
|
||||||
let
|
let
|
||||||
discoveryStore = Datastore(SQLiteDatastore.new(
|
discoveryDir = config.dataDir / CodexDhtNamespace
|
||||||
config.dataDir / "dht")
|
|
||||||
.expect("Should not fail!"))
|
if io2.createPath(discoveryDir).isErr:
|
||||||
|
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
|
||||||
|
raise (ref Defect)(
|
||||||
|
msg: "Unable to create discovery directory for block store: " & discoveryDir)
|
||||||
|
|
||||||
|
let
|
||||||
|
discoveryStore = Datastore(
|
||||||
|
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
|
||||||
|
.expect("Should create discovery datastore!"))
|
||||||
|
|
||||||
discovery = Discovery.new(
|
discovery = Discovery.new(
|
||||||
switch.peerInfo.privateKey,
|
switch.peerInfo.privateKey,
|
||||||
|
@ -136,32 +245,85 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
||||||
|
|
||||||
wallet = WalletRef.new(EthPrivateKey.random())
|
wallet = WalletRef.new(EthPrivateKey.random())
|
||||||
network = BlockExcNetwork.new(switch)
|
network = BlockExcNetwork.new(switch)
|
||||||
repoDir = config.dataDir / "repo"
|
|
||||||
|
|
||||||
if io2.createPath(repoDir).isErr:
|
repoData = case config.repoKind
|
||||||
trace "Unable to create data directory for block store", dataDir = repoDir
|
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
|
||||||
raise (ref Defect)(
|
.expect("Should create repo file data store!"))
|
||||||
msg: "Unable to create data directory for block store: " & repoDir)
|
of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir)
|
||||||
|
.expect("Should create repo SQLite data store!"))
|
||||||
|
of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir)
|
||||||
|
.expect("Should create repo LevelDB data store!"))
|
||||||
|
|
||||||
|
repoStore = RepoStore.new(
|
||||||
|
repoDs = repoData,
|
||||||
|
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace)
|
||||||
|
.expect("Should create metadata store!"),
|
||||||
|
quotaMaxBytes = config.storageQuota,
|
||||||
|
blockTtl = config.blockTtl)
|
||||||
|
|
||||||
|
maintenance = BlockMaintainer.new(
|
||||||
|
repoStore,
|
||||||
|
interval = config.blockMaintenanceInterval,
|
||||||
|
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks)
|
||||||
|
|
||||||
let
|
|
||||||
localStore = FSStore.new(repoDir, cache = cache)
|
|
||||||
peerStore = PeerCtxStore.new()
|
peerStore = PeerCtxStore.new()
|
||||||
pendingBlocks = PendingBlocksManager.new()
|
pendingBlocks = PendingBlocksManager.new()
|
||||||
blockDiscovery = DiscoveryEngine.new(localStore, peerStore, network, discovery, pendingBlocks)
|
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||||
engine = BlockExcEngine.new(localStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
||||||
store = NetworkStore.new(engine, localStore)
|
store = NetworkStore.new(engine, repoStore)
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
prover = if config.prover:
|
||||||
contracts = ContractInteractions.new(config)
|
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and
|
||||||
codexNode = CodexNodeRef.new(switch, store, engine, erasure, discovery, contracts)
|
endsWith($config.circomR1cs, ".r1cs"):
|
||||||
|
error "Circom R1CS file not accessible"
|
||||||
|
raise (ref Defect)(
|
||||||
|
msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
|
||||||
|
|
||||||
|
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and
|
||||||
|
endsWith($config.circomWasm, ".wasm"):
|
||||||
|
error "Circom wasm file not accessible"
|
||||||
|
raise (ref Defect)(
|
||||||
|
msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)")
|
||||||
|
|
||||||
|
let zkey = if not config.circomNoZkey:
|
||||||
|
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and
|
||||||
|
endsWith($config.circomZkey, ".zkey"):
|
||||||
|
error "Circom zkey file not accessible"
|
||||||
|
raise (ref Defect)(
|
||||||
|
msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)")
|
||||||
|
|
||||||
|
$config.circomZkey
|
||||||
|
else: ""
|
||||||
|
|
||||||
|
some Prover.new(
|
||||||
|
store,
|
||||||
|
CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey),
|
||||||
|
config.numProofSamples)
|
||||||
|
else:
|
||||||
|
none Prover
|
||||||
|
|
||||||
|
taskpool = Taskpool.new(num_threads = countProcessors())
|
||||||
|
|
||||||
|
codexNode = CodexNodeRef.new(
|
||||||
|
switch = switch,
|
||||||
|
networkStore = store,
|
||||||
|
engine = engine,
|
||||||
|
prover = prover,
|
||||||
|
discovery = discovery,
|
||||||
|
taskpool = taskpool)
|
||||||
|
|
||||||
restServer = RestServerRef.new(
|
restServer = RestServerRef.new(
|
||||||
codexNode.initRestApi(config),
|
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||||
initTAddress("127.0.0.1" , config.apiPort),
|
initTAddress(config.apiBindAddress , config.apiPort),
|
||||||
bufferSize = (1024 * 64),
|
bufferSize = (1024 * 64),
|
||||||
maxRequestBodySize = int.high)
|
maxRequestBodySize = int.high)
|
||||||
.expect("Should start rest server!")
|
.expect("Should start rest server!")
|
||||||
|
|
||||||
switch.mount(network)
|
switch.mount(network)
|
||||||
T(
|
|
||||||
|
CodexServer(
|
||||||
config: config,
|
config: config,
|
||||||
codexNode: codexNode,
|
codexNode: codexNode,
|
||||||
restServer: restServer)
|
restServer: restServer,
|
||||||
|
repoStore: repoStore,
|
||||||
|
maintenance: maintenance,
|
||||||
|
taskpool: taskpool)
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/tables
|
||||||
|
import std/sugar
|
||||||
|
|
||||||
|
import pkg/libp2p/multicodec
|
||||||
|
import pkg/libp2p/multihash
|
||||||
|
import pkg/libp2p/cid
|
||||||
|
import pkg/results
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import ./units
|
||||||
|
import ./errors
|
||||||
|
|
||||||
|
export tables
|
||||||
|
|
||||||
|
const
|
||||||
|
# Size of blocks for storage / network exchange,
|
||||||
|
DefaultBlockSize* = NBytes 1024*64
|
||||||
|
DefaultCellSize* = NBytes 2048
|
||||||
|
|
||||||
|
# Proving defaults
|
||||||
|
DefaultMaxSlotDepth* = 32
|
||||||
|
DefaultMaxDatasetDepth* = 8
|
||||||
|
DefaultBlockDepth* = 5
|
||||||
|
DefaultCellElms* = 67
|
||||||
|
DefaultSamplesNum* = 5
|
||||||
|
|
||||||
|
# hashes
|
||||||
|
Sha256HashCodec* = multiCodec("sha2-256")
|
||||||
|
Sha512HashCodec* = multiCodec("sha2-512")
|
||||||
|
Pos2Bn128SpngCodec* = multiCodec("poseidon2-alt_bn_128-sponge-r2")
|
||||||
|
Pos2Bn128MrklCodec* = multiCodec("poseidon2-alt_bn_128-merkle-2kb")
|
||||||
|
|
||||||
|
ManifestCodec* = multiCodec("codex-manifest")
|
||||||
|
DatasetRootCodec* = multiCodec("codex-root")
|
||||||
|
BlockCodec* = multiCodec("codex-block")
|
||||||
|
SlotRootCodec* = multiCodec("codex-slot-root")
|
||||||
|
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
||||||
|
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
||||||
|
|
||||||
|
CodexHashesCodecs* = [
|
||||||
|
Sha256HashCodec,
|
||||||
|
Pos2Bn128SpngCodec,
|
||||||
|
Pos2Bn128MrklCodec
|
||||||
|
]
|
||||||
|
|
||||||
|
CodexPrimitivesCodecs* = [
|
||||||
|
ManifestCodec,
|
||||||
|
DatasetRootCodec,
|
||||||
|
BlockCodec,
|
||||||
|
SlotRootCodec,
|
||||||
|
SlotProvingRootCodec,
|
||||||
|
CodexSlotCellCodec,
|
||||||
|
]
|
||||||
|
|
||||||
|
proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||||
|
## Initialize padding blocks table
|
||||||
|
##
|
||||||
|
## TODO: Ideally this is done at compile time, but for now
|
||||||
|
## we do it at runtime because of an `importc` error that is
|
||||||
|
## coming from somewhere in MultiHash that I can't track down.
|
||||||
|
##
|
||||||
|
|
||||||
|
let
|
||||||
|
emptyData: seq[byte] = @[]
|
||||||
|
PadHashes = {
|
||||||
|
Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
|
||||||
|
Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||||
|
}.toTable
|
||||||
|
|
||||||
|
var
|
||||||
|
table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||||
|
|
||||||
|
for hcodec, mhash in PadHashes.pairs:
|
||||||
|
table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure
|
||||||
|
|
||||||
|
success table
|
||||||
|
|
||||||
|
proc emptyCid*(
|
||||||
|
version: CidVersion,
|
||||||
|
hcodec: MultiCodec,
|
||||||
|
dcodec: MultiCodec): ?!Cid =
|
||||||
|
## Returns cid representing empty content,
|
||||||
|
## given cid version, hash codec and data codec
|
||||||
|
##
|
||||||
|
|
||||||
|
var
|
||||||
|
table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
|
||||||
|
|
||||||
|
once:
|
||||||
|
table = ? initEmptyCidTable()
|
||||||
|
|
||||||
|
table[(version, hcodec, dcodec)].catch
|
||||||
|
|
||||||
|
proc emptyDigest*(
|
||||||
|
version: CidVersion,
|
||||||
|
hcodec: MultiCodec,
|
||||||
|
dcodec: MultiCodec): ?!MultiHash =
|
||||||
|
## Returns hash representing empty content,
|
||||||
|
## given cid version, hash codec and data codec
|
||||||
|
##
|
||||||
|
emptyCid(version, hcodec, dcodec)
|
||||||
|
.flatMap((cid: Cid) => cid.mhash.mapFailure)
|
543
codex/conf.nim
543
codex/conf.nim
|
@ -7,9 +7,7 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [].}
|
||||||
|
|
||||||
push: {.upraises: [].}
|
|
||||||
|
|
||||||
import std/os
|
import std/os
|
||||||
import std/terminal
|
import std/terminal
|
||||||
|
@ -17,37 +15,84 @@ import std/options
|
||||||
import std/strutils
|
import std/strutils
|
||||||
import std/typetraits
|
import std/typetraits
|
||||||
|
|
||||||
import pkg/chronicles
|
import pkg/chronos
|
||||||
import pkg/chronicles/helpers
|
import pkg/chronicles/helpers
|
||||||
import pkg/chronicles/topics_registry
|
import pkg/chronicles/topics_registry
|
||||||
import pkg/confutils/defs
|
import pkg/confutils/defs
|
||||||
import pkg/confutils/std/net
|
import pkg/confutils/std/net
|
||||||
|
import pkg/toml_serialization
|
||||||
import pkg/metrics
|
import pkg/metrics
|
||||||
import pkg/metrics/chronos_httpserver
|
import pkg/metrics/chronos_httpserver
|
||||||
import pkg/stew/shims/net as stewnet
|
import pkg/stew/shims/net as stewnet
|
||||||
|
import pkg/stew/shims/parseutils
|
||||||
|
import pkg/stew/byteutils
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import ./codextypes
|
||||||
import ./discovery
|
import ./discovery
|
||||||
import ./stores/cachestore
|
import ./logutils
|
||||||
|
import ./stores
|
||||||
|
import ./units
|
||||||
|
import ./utils
|
||||||
|
|
||||||
export DefaultCacheSizeMiB, net
|
export units, net, codextypes, logutils
|
||||||
|
|
||||||
|
export
|
||||||
|
DefaultQuotaBytes,
|
||||||
|
DefaultBlockTtl,
|
||||||
|
DefaultBlockMaintenanceInterval,
|
||||||
|
DefaultNumberOfBlocksToMaintainPerInterval
|
||||||
|
|
||||||
|
proc defaultDataDir*(): string =
|
||||||
|
let dataDir = when defined(windows):
|
||||||
|
"AppData" / "Roaming" / "Codex"
|
||||||
|
elif defined(macosx):
|
||||||
|
"Library" / "Application Support" / "Codex"
|
||||||
|
else:
|
||||||
|
".cache" / "codex"
|
||||||
|
|
||||||
|
getHomeDir() / dataDir
|
||||||
|
|
||||||
|
const
|
||||||
|
codex_enable_api_debug_peers* {.booldefine.} = false
|
||||||
|
codex_enable_proof_failures* {.booldefine.} = false
|
||||||
|
codex_enable_log_counter* {.booldefine.} = false
|
||||||
|
|
||||||
|
DefaultDataDir* = defaultDataDir()
|
||||||
|
|
||||||
type
|
type
|
||||||
StartUpCommand* {.pure.} = enum
|
StartUpCmd* {.pure.} = enum
|
||||||
noCommand,
|
noCmd
|
||||||
initNode
|
persistence
|
||||||
|
|
||||||
LogKind* = enum
|
PersistenceCmd* {.pure.} = enum
|
||||||
|
noCmd
|
||||||
|
prover
|
||||||
|
|
||||||
|
LogKind* {.pure.} = enum
|
||||||
Auto = "auto"
|
Auto = "auto"
|
||||||
Colors = "colors"
|
Colors = "colors"
|
||||||
NoColors = "nocolors"
|
NoColors = "nocolors"
|
||||||
Json = "json"
|
Json = "json"
|
||||||
None = "none"
|
None = "none"
|
||||||
|
|
||||||
|
RepoKind* = enum
|
||||||
|
repoFS = "fs"
|
||||||
|
repoSQLite = "sqlite"
|
||||||
|
repoLevelDb = "leveldb"
|
||||||
|
|
||||||
CodexConf* = object
|
CodexConf* = object
|
||||||
|
configFile* {.
|
||||||
|
desc: "Loads the configuration from a TOML file"
|
||||||
|
defaultValueDesc: "none"
|
||||||
|
defaultValue: InputFile.none
|
||||||
|
name: "config-file" }: Option[InputFile]
|
||||||
|
|
||||||
logLevel* {.
|
logLevel* {.
|
||||||
defaultValue: "INFO"
|
defaultValue: "info"
|
||||||
desc: "Sets the log level",
|
desc: "Sets the log level",
|
||||||
name: "log-level" }: string
|
name: "log-level" }: string
|
||||||
|
|
||||||
|
@ -75,86 +120,132 @@ type
|
||||||
name: "metrics-port" }: Port
|
name: "metrics-port" }: Port
|
||||||
|
|
||||||
dataDir* {.
|
dataDir* {.
|
||||||
desc: "The directory where codex will store configuration and data."
|
desc: "The directory where codex will store configuration and data"
|
||||||
defaultValue: defaultDataDir()
|
defaultValue: DefaultDataDir
|
||||||
defaultValueDesc: ""
|
defaultValueDesc: $DefaultDataDir
|
||||||
abbr: "d"
|
abbr: "d"
|
||||||
name: "data-dir" }: OutDir
|
name: "data-dir" }: OutDir
|
||||||
|
|
||||||
|
listenAddrs* {.
|
||||||
|
desc: "Multi Addresses to listen on"
|
||||||
|
defaultValue: @[
|
||||||
|
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||||
|
.expect("Should init multiaddress")]
|
||||||
|
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||||
|
abbr: "i"
|
||||||
|
name: "listen-addrs" }: seq[MultiAddress]
|
||||||
|
|
||||||
|
# TODO: change this once we integrate nat support
|
||||||
|
nat* {.
|
||||||
|
desc: "IP Addresses to announce behind a NAT"
|
||||||
|
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||||
|
defaultValueDesc: "127.0.0.1"
|
||||||
|
abbr: "a"
|
||||||
|
name: "nat" }: ValidIpAddress
|
||||||
|
|
||||||
|
discoveryIp* {.
|
||||||
|
desc: "Discovery listen address"
|
||||||
|
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||||
|
defaultValueDesc: "0.0.0.0"
|
||||||
|
abbr: "e"
|
||||||
|
name: "disc-ip" }: ValidIpAddress
|
||||||
|
|
||||||
|
discoveryPort* {.
|
||||||
|
desc: "Discovery (UDP) port"
|
||||||
|
defaultValue: 8090.Port
|
||||||
|
defaultValueDesc: "8090"
|
||||||
|
abbr: "u"
|
||||||
|
name: "disc-port" }: Port
|
||||||
|
|
||||||
|
netPrivKeyFile* {.
|
||||||
|
desc: "Source of network (secp256k1) private key file path or name"
|
||||||
|
defaultValue: "key"
|
||||||
|
name: "net-privkey" }: string
|
||||||
|
|
||||||
|
bootstrapNodes* {.
|
||||||
|
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||||
|
abbr: "b"
|
||||||
|
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||||
|
|
||||||
|
maxPeers* {.
|
||||||
|
desc: "The maximum number of peers to connect to"
|
||||||
|
defaultValue: 160
|
||||||
|
name: "max-peers" }: int
|
||||||
|
|
||||||
|
agentString* {.
|
||||||
|
defaultValue: "Codex"
|
||||||
|
desc: "Node agent string which is used as identifier in network"
|
||||||
|
name: "agent-string" }: string
|
||||||
|
|
||||||
|
apiBindAddress* {.
|
||||||
|
desc: "The REST API bind address"
|
||||||
|
defaultValue: "127.0.0.1"
|
||||||
|
name: "api-bindaddr"
|
||||||
|
}: string
|
||||||
|
|
||||||
|
apiPort* {.
|
||||||
|
desc: "The REST Api port",
|
||||||
|
defaultValue: 8080.Port
|
||||||
|
defaultValueDesc: "8080"
|
||||||
|
name: "api-port"
|
||||||
|
abbr: "p" }: Port
|
||||||
|
|
||||||
|
apiCorsAllowedOrigin* {.
|
||||||
|
desc: "The REST Api CORS allowed origin for downloading data. '*' will allow all origins, '' will allow none.",
|
||||||
|
defaultValue: string.none
|
||||||
|
defaultValueDesc: "Disallow all cross origin requests to download data"
|
||||||
|
name: "api-cors-origin" }: Option[string]
|
||||||
|
|
||||||
|
repoKind* {.
|
||||||
|
desc: "Backend for main repo store (fs, sqlite, leveldb)"
|
||||||
|
defaultValueDesc: "fs"
|
||||||
|
defaultValue: repoFS
|
||||||
|
name: "repo-kind" }: RepoKind
|
||||||
|
|
||||||
|
storageQuota* {.
|
||||||
|
desc: "The size of the total storage quota dedicated to the node"
|
||||||
|
defaultValue: DefaultQuotaBytes
|
||||||
|
defaultValueDesc: $DefaultQuotaBytes
|
||||||
|
name: "storage-quota"
|
||||||
|
abbr: "q" }: NBytes
|
||||||
|
|
||||||
|
blockTtl* {.
|
||||||
|
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||||
|
defaultValue: DefaultBlockTtl
|
||||||
|
defaultValueDesc: $DefaultBlockTtl
|
||||||
|
name: "block-ttl"
|
||||||
|
abbr: "t" }: Duration
|
||||||
|
|
||||||
|
blockMaintenanceInterval* {.
|
||||||
|
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
||||||
|
defaultValue: DefaultBlockMaintenanceInterval
|
||||||
|
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
||||||
|
name: "block-mi" }: Duration
|
||||||
|
|
||||||
|
blockMaintenanceNumberOfBlocks* {.
|
||||||
|
desc: "Number of blocks to check every maintenance cycle"
|
||||||
|
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
||||||
|
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
||||||
|
name: "block-mn" }: int
|
||||||
|
|
||||||
|
cacheSize* {.
|
||||||
|
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||||
|
defaultValue: 0
|
||||||
|
defaultValueDesc: "0"
|
||||||
|
name: "cache-size"
|
||||||
|
abbr: "c" }: NBytes
|
||||||
|
|
||||||
|
logFile* {.
|
||||||
|
desc: "Logs to file"
|
||||||
|
defaultValue: string.none
|
||||||
|
name: "log-file"
|
||||||
|
hidden
|
||||||
|
.}: Option[string]
|
||||||
|
|
||||||
case cmd* {.
|
case cmd* {.
|
||||||
command
|
defaultValue: noCmd
|
||||||
defaultValue: noCommand }: StartUpCommand
|
command }: StartUpCmd
|
||||||
|
of persistence:
|
||||||
of noCommand:
|
|
||||||
listenAddrs* {.
|
|
||||||
desc: "Multi Addresses to listen on"
|
|
||||||
defaultValue: @[
|
|
||||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
|
||||||
.expect("Should init multiaddress")]
|
|
||||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
|
||||||
abbr: "i"
|
|
||||||
name: "listen-addrs" }: seq[MultiAddress]
|
|
||||||
|
|
||||||
nat* {.
|
|
||||||
# TODO: change this once we integrate nat support
|
|
||||||
desc: "IP Addresses to announce behind a NAT"
|
|
||||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
|
||||||
defaultValueDesc: "127.0.0.1"
|
|
||||||
abbr: "a"
|
|
||||||
name: "nat" }: ValidIpAddress
|
|
||||||
|
|
||||||
discoveryIp* {.
|
|
||||||
desc: "Discovery listen address"
|
|
||||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
|
||||||
defaultValueDesc: "0.0.0.0"
|
|
||||||
name: "disc-ip" }: ValidIpAddress
|
|
||||||
|
|
||||||
discoveryPort* {.
|
|
||||||
desc: "Discovery (UDP) port"
|
|
||||||
defaultValue: Port(8090)
|
|
||||||
defaultValueDesc: "8090"
|
|
||||||
name: "disc-port" }: Port
|
|
||||||
|
|
||||||
netPrivKeyFile* {.
|
|
||||||
desc: "Source of network (secp256k1) private key file path or name"
|
|
||||||
defaultValue: "key"
|
|
||||||
name: "net-privkey" }: string
|
|
||||||
|
|
||||||
bootstrapNodes* {.
|
|
||||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network."
|
|
||||||
abbr: "b"
|
|
||||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
|
||||||
|
|
||||||
maxPeers* {.
|
|
||||||
desc: "The maximum number of peers to connect to"
|
|
||||||
defaultValue: 160
|
|
||||||
name: "max-peers" }: int
|
|
||||||
|
|
||||||
agentString* {.
|
|
||||||
defaultValue: "Codex"
|
|
||||||
desc: "Node agent string which is used as identifier in network"
|
|
||||||
name: "agent-string" }: string
|
|
||||||
|
|
||||||
apiPort* {.
|
|
||||||
desc: "The REST Api port",
|
|
||||||
defaultValue: 8080
|
|
||||||
defaultValueDesc: "8080"
|
|
||||||
name: "api-port"
|
|
||||||
abbr: "p" }: int
|
|
||||||
|
|
||||||
cacheSize* {.
|
|
||||||
desc: "The size in MiB of the block cache, 0 disables the cache - might help on slow hardrives"
|
|
||||||
defaultValue: 0
|
|
||||||
defaultValueDesc: "0"
|
|
||||||
name: "cache-size"
|
|
||||||
abbr: "c" }: Natural
|
|
||||||
|
|
||||||
persistence* {.
|
|
||||||
desc: "Enables persistence mechanism, requires an Ethereum node"
|
|
||||||
defaultValue: false
|
|
||||||
name: "persistence"
|
|
||||||
.}: bool
|
|
||||||
|
|
||||||
ethProvider* {.
|
ethProvider* {.
|
||||||
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
||||||
defaultValue: "ws://localhost:8545"
|
defaultValue: "ws://localhost:8545"
|
||||||
|
@ -164,66 +255,244 @@ type
|
||||||
ethAccount* {.
|
ethAccount* {.
|
||||||
desc: "The Ethereum account that is used for storage contracts"
|
desc: "The Ethereum account that is used for storage contracts"
|
||||||
defaultValue: EthAddress.none
|
defaultValue: EthAddress.none
|
||||||
|
defaultValueDesc: ""
|
||||||
name: "eth-account"
|
name: "eth-account"
|
||||||
.}: Option[EthAddress]
|
.}: Option[EthAddress]
|
||||||
|
|
||||||
ethDeployment* {.
|
ethPrivateKey* {.
|
||||||
desc: "The json file describing the contract deployment"
|
desc: "File containing Ethereum private key for storage contracts"
|
||||||
defaultValue: string.none
|
defaultValue: string.none
|
||||||
name: "eth-deployment"
|
defaultValueDesc: ""
|
||||||
|
name: "eth-private-key"
|
||||||
.}: Option[string]
|
.}: Option[string]
|
||||||
|
|
||||||
of initNode:
|
marketplaceAddress* {.
|
||||||
discard
|
desc: "Address of deployed Marketplace contract"
|
||||||
|
defaultValue: EthAddress.none
|
||||||
|
defaultValueDesc: ""
|
||||||
|
name: "marketplace-address"
|
||||||
|
.}: Option[EthAddress]
|
||||||
|
|
||||||
|
# TODO: should go behind a feature flag
|
||||||
|
simulateProofFailures* {.
|
||||||
|
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
||||||
|
defaultValue: 0
|
||||||
|
name: "simulate-proof-failures"
|
||||||
|
hidden
|
||||||
|
.}: int
|
||||||
|
|
||||||
|
validator* {.
|
||||||
|
desc: "Enables validator, requires an Ethereum node"
|
||||||
|
defaultValue: false
|
||||||
|
name: "validator"
|
||||||
|
.}: bool
|
||||||
|
|
||||||
|
validatorMaxSlots* {.
|
||||||
|
desc: "Maximum number of slots that the validator monitors"
|
||||||
|
defaultValue: 1000
|
||||||
|
name: "validator-max-slots"
|
||||||
|
.}: int
|
||||||
|
|
||||||
|
case persistenceCmd* {.
|
||||||
|
defaultValue: noCmd
|
||||||
|
command }: PersistenceCmd
|
||||||
|
|
||||||
|
of PersistenceCmd.prover:
|
||||||
|
circomR1cs* {.
|
||||||
|
desc: "The r1cs file for the storage circuit"
|
||||||
|
defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
|
||||||
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
|
||||||
|
name: "circom-r1cs"
|
||||||
|
.}: InputFile
|
||||||
|
|
||||||
|
circomWasm* {.
|
||||||
|
desc: "The wasm file for the storage circuit"
|
||||||
|
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
|
||||||
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
|
||||||
|
name: "circom-wasm"
|
||||||
|
.}: InputFile
|
||||||
|
|
||||||
|
circomZkey* {.
|
||||||
|
desc: "The zkey file for the storage circuit"
|
||||||
|
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
|
||||||
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
|
||||||
|
name: "circom-zkey"
|
||||||
|
.}: InputFile
|
||||||
|
|
||||||
|
# TODO: should probably be hidden and behind a feature flag
|
||||||
|
circomNoZkey* {.
|
||||||
|
desc: "Ignore the zkey file - use only for testing!"
|
||||||
|
defaultValue: false
|
||||||
|
name: "circom-no-zkey"
|
||||||
|
.}: bool
|
||||||
|
|
||||||
|
numProofSamples* {.
|
||||||
|
desc: "Number of samples to prove"
|
||||||
|
defaultValue: DefaultSamplesNum
|
||||||
|
defaultValueDesc: $DefaultSamplesNum
|
||||||
|
name: "proof-samples" }: int
|
||||||
|
|
||||||
|
maxSlotDepth* {.
|
||||||
|
desc: "The maximum depth of the slot tree"
|
||||||
|
defaultValue: DefaultMaxSlotDepth
|
||||||
|
defaultValueDesc: $DefaultMaxSlotDepth
|
||||||
|
name: "max-slot-depth" }: int
|
||||||
|
|
||||||
|
maxDatasetDepth* {.
|
||||||
|
desc: "The maximum depth of the dataset tree"
|
||||||
|
defaultValue: DefaultMaxDatasetDepth
|
||||||
|
defaultValueDesc: $DefaultMaxDatasetDepth
|
||||||
|
name: "max-dataset-depth" }: int
|
||||||
|
|
||||||
|
maxBlockDepth* {.
|
||||||
|
desc: "The maximum depth of the network block merkle tree"
|
||||||
|
defaultValue: DefaultBlockDepth
|
||||||
|
defaultValueDesc: $DefaultBlockDepth
|
||||||
|
name: "max-block-depth" }: int
|
||||||
|
|
||||||
|
maxCellElms* {.
|
||||||
|
desc: "The maximum number of elements in a cell"
|
||||||
|
defaultValue: DefaultCellElms
|
||||||
|
defaultValueDesc: $DefaultCellElms
|
||||||
|
name: "max-cell-elements" }: int
|
||||||
|
of PersistenceCmd.noCmd:
|
||||||
|
discard
|
||||||
|
|
||||||
|
of StartUpCmd.noCmd:
|
||||||
|
discard # end of persistence
|
||||||
|
|
||||||
EthAddress* = ethers.Address
|
EthAddress* = ethers.Address
|
||||||
|
|
||||||
|
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, EthAddress): %it
|
||||||
|
|
||||||
|
func persistence*(self: CodexConf): bool =
|
||||||
|
self.cmd == StartUpCmd.persistence
|
||||||
|
|
||||||
|
func prover*(self: CodexConf): bool =
|
||||||
|
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
||||||
|
|
||||||
|
proc getCodexVersion(): string =
|
||||||
|
let tag = strip(staticExec("git tag"))
|
||||||
|
if tag.isEmptyOrWhitespace:
|
||||||
|
return "untagged build"
|
||||||
|
return tag
|
||||||
|
|
||||||
|
proc getCodexRevision(): string =
|
||||||
|
# using a slice in a static context breaks nimsuggest for some reason
|
||||||
|
var res = strip(staticExec("git rev-parse --short HEAD"))
|
||||||
|
return res
|
||||||
|
|
||||||
|
proc getNimBanner(): string =
|
||||||
|
staticExec("nim --version | grep Version")
|
||||||
|
|
||||||
const
|
const
|
||||||
gitRevision* = strip(staticExec("git rev-parse --short HEAD"))[0..5]
|
codexVersion* = getCodexVersion()
|
||||||
|
codexRevision* = getCodexRevision()
|
||||||
nimBanner* = staticExec("nim --version | grep Version")
|
nimBanner* = getNimBanner()
|
||||||
|
|
||||||
#TODO add versionMajor, Minor & Fix when we switch to semver
|
|
||||||
codexVersion* = gitRevision
|
|
||||||
|
|
||||||
codexFullVersion* =
|
codexFullVersion* =
|
||||||
"Codex build " & codexVersion & "\p" &
|
"Codex version: " & codexVersion & "\p" &
|
||||||
|
"Codex revision: " & codexRevision & "\p" &
|
||||||
nimBanner
|
nimBanner
|
||||||
|
|
||||||
proc defaultDataDir*(): string =
|
proc parseCmdArg*(T: typedesc[MultiAddress],
|
||||||
let dataDir = when defined(windows):
|
input: string): MultiAddress
|
||||||
"AppData" / "Roaming" / "Codex"
|
{.upraises: [ValueError, LPError].} =
|
||||||
elif defined(macosx):
|
var ma: MultiAddress
|
||||||
"Library" / "Application Support" / "Codex"
|
let res = MultiAddress.init(input)
|
||||||
|
if res.isOk:
|
||||||
|
ma = res.get()
|
||||||
else:
|
else:
|
||||||
".cache" / "codex"
|
warn "Invalid MultiAddress", input=input, error = res.error()
|
||||||
|
quit QuitFailure
|
||||||
|
ma
|
||||||
|
|
||||||
getHomeDir() / dataDir
|
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||||
|
|
||||||
func parseCmdArg*(T: type MultiAddress, input: TaintedString): T
|
|
||||||
{.raises: [ValueError, LPError, Defect].} =
|
|
||||||
MultiAddress.init($input).tryGet()
|
|
||||||
|
|
||||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: TaintedString): T =
|
|
||||||
var res: SignedPeerRecord
|
var res: SignedPeerRecord
|
||||||
try:
|
try:
|
||||||
if not res.fromURI(uri):
|
if not res.fromURI(uri):
|
||||||
warn "Invalid SignedPeerRecord uri", uri=uri
|
warn "Invalid SignedPeerRecord uri", uri = uri
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
warn "Invalid SignedPeerRecord uri", uri=uri, error=exc.msg
|
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
res
|
res
|
||||||
|
|
||||||
func parseCmdArg*(T: type EthAddress, address: TaintedString): T =
|
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
||||||
EthAddress.init($address).get()
|
EthAddress.init($address).get()
|
||||||
|
|
||||||
|
proc parseCmdArg*(T: type NBytes, val: string): T =
|
||||||
|
var num = 0'i64
|
||||||
|
let count = parseSize(val, num, alwaysBin = true)
|
||||||
|
if count == 0:
|
||||||
|
warn "Invalid number of bytes", nbytes = val
|
||||||
|
quit QuitFailure
|
||||||
|
NBytes(num)
|
||||||
|
|
||||||
|
proc parseCmdArg*(T: type Duration, val: string): T =
|
||||||
|
var dur: Duration
|
||||||
|
let count = parseDuration(val, dur)
|
||||||
|
if count == 0:
|
||||||
|
warn "Cannot parse duration", dur = dur
|
||||||
|
quit QuitFailure
|
||||||
|
dur
|
||||||
|
|
||||||
|
proc readValue*(r: var TomlReader, val: var EthAddress)
|
||||||
|
{.upraises: [SerializationError, IOError].} =
|
||||||
|
val = EthAddress.init(r.readValue(string)).get()
|
||||||
|
|
||||||
|
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||||
|
without uri =? r.readValue(string).catch, err:
|
||||||
|
error "invalid SignedPeerRecord configuration value", error = err.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
val = SignedPeerRecord.parseCmdArg(uri)
|
||||||
|
|
||||||
|
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||||
|
without input =? r.readValue(string).catch, err:
|
||||||
|
error "invalid MultiAddress configuration value", error = err.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
let res = MultiAddress.init(input)
|
||||||
|
if res.isOk:
|
||||||
|
val = res.get()
|
||||||
|
else:
|
||||||
|
warn "Invalid MultiAddress", input=input, error=res.error()
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
proc readValue*(r: var TomlReader, val: var NBytes)
|
||||||
|
{.upraises: [SerializationError, IOError].} =
|
||||||
|
var value = 0'i64
|
||||||
|
var str = r.readValue(string)
|
||||||
|
let count = parseSize(str, value, alwaysBin = true)
|
||||||
|
if count == 0:
|
||||||
|
error "invalid number of bytes for configuration value", value = str
|
||||||
|
quit QuitFailure
|
||||||
|
val = NBytes(value)
|
||||||
|
|
||||||
|
proc readValue*(r: var TomlReader, val: var Duration)
|
||||||
|
{.upraises: [SerializationError, IOError].} =
|
||||||
|
var str = r.readValue(string)
|
||||||
|
var dur: Duration
|
||||||
|
let count = parseDuration(str, dur)
|
||||||
|
if count == 0:
|
||||||
|
error "Invalid duration parse", value = str
|
||||||
|
quit QuitFailure
|
||||||
|
val = dur
|
||||||
|
|
||||||
# no idea why confutils needs this:
|
# no idea why confutils needs this:
|
||||||
proc completeCmdArg*(T: type EthAddress; val: TaintedString): seq[string] =
|
proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc completeCmdArg*(T: type NBytes; val: string): seq[string] =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc completeCmdArg*(T: type Duration; val: string): seq[string] =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
# silly chronicles, colors is a compile-time property
|
# silly chronicles, colors is a compile-time property
|
||||||
proc stripAnsi(v: string): string =
|
proc stripAnsi*(v: string): string =
|
||||||
var
|
var
|
||||||
res = newStringOfCap(v.len)
|
res = newStringOfCap(v.len)
|
||||||
i: int
|
i: int
|
||||||
|
@ -258,13 +527,13 @@ proc stripAnsi(v: string): string =
|
||||||
|
|
||||||
res
|
res
|
||||||
|
|
||||||
proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
|
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
|
||||||
# Updates log levels (without clearing old ones)
|
# Updates log levels (without clearing old ones)
|
||||||
let directives = logLevel.split(";")
|
let directives = logLevel.split(";")
|
||||||
try:
|
try:
|
||||||
setLogLevel(parseEnum[LogLevel](directives[0]))
|
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise (ref ValueError)(msg: "Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL")
|
raise (ref ValueError)(msg: "Please specify one of: trace, debug, info, notice, warn, error or fatal")
|
||||||
|
|
||||||
if directives.len > 1:
|
if directives.len > 1:
|
||||||
for topicName, settings in parseTopicDirectives(directives[1..^1]):
|
for topicName, settings in parseTopicDirectives(directives[1..^1]):
|
||||||
|
@ -272,9 +541,10 @@ proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
|
||||||
warn "Unrecognized logging topic", topic = topicName
|
warn "Unrecognized logging topic", topic = topicName
|
||||||
|
|
||||||
proc setupLogging*(conf: CodexConf) =
|
proc setupLogging*(conf: CodexConf) =
|
||||||
when defaultChroniclesStream.outputs.type.arity != 2:
|
when defaultChroniclesStream.outputs.type.arity != 3:
|
||||||
warn "Logging configuration options not enabled in the current build"
|
warn "Logging configuration options not enabled in the current build"
|
||||||
else:
|
else:
|
||||||
|
var logFile: ?IoHandle
|
||||||
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
||||||
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
||||||
try:
|
try:
|
||||||
|
@ -289,9 +559,28 @@ proc setupLogging*(conf: CodexConf) =
|
||||||
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
||||||
writeAndFlush(stdout, stripAnsi(msg))
|
writeAndFlush(stdout, stripAnsi(msg))
|
||||||
|
|
||||||
|
proc fileFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
||||||
|
if file =? logFile:
|
||||||
|
if error =? file.writeFile(stripAnsi(msg).toBytes).errorOption:
|
||||||
|
error "failed to write to log file", errorCode = $error
|
||||||
|
|
||||||
|
defaultChroniclesStream.outputs[2].writer = noOutput
|
||||||
|
if logFilePath =? conf.logFile and logFilePath.len > 0:
|
||||||
|
let logFileHandle = openFile(
|
||||||
|
logFilePath,
|
||||||
|
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
|
||||||
|
)
|
||||||
|
if logFileHandle.isErr:
|
||||||
|
error "failed to open log file",
|
||||||
|
path = logFilePath,
|
||||||
|
errorCode = $logFileHandle.error
|
||||||
|
else:
|
||||||
|
logFile = logFileHandle.option
|
||||||
|
defaultChroniclesStream.outputs[2].writer = fileFlush
|
||||||
|
|
||||||
defaultChroniclesStream.outputs[1].writer = noOutput
|
defaultChroniclesStream.outputs[1].writer = noOutput
|
||||||
|
|
||||||
defaultChroniclesStream.outputs[0].writer =
|
let writer =
|
||||||
case conf.logFormat:
|
case conf.logFormat:
|
||||||
of LogKind.Auto:
|
of LogKind.Auto:
|
||||||
if isatty(stdout):
|
if isatty(stdout):
|
||||||
|
@ -306,6 +595,16 @@ proc setupLogging*(conf: CodexConf) =
|
||||||
of LogKind.None:
|
of LogKind.None:
|
||||||
noOutput
|
noOutput
|
||||||
|
|
||||||
|
when codex_enable_log_counter:
|
||||||
|
var counter = 0.uint64
|
||||||
|
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
|
||||||
|
inc(counter)
|
||||||
|
let withoutNewLine = msg[0..^2]
|
||||||
|
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
|
||||||
|
defaultChroniclesStream.outputs[0].writer = numberedWriter
|
||||||
|
else:
|
||||||
|
defaultChroniclesStream.outputs[0].writer = writer
|
||||||
|
|
||||||
try:
|
try:
|
||||||
updateLogLevel(conf.logLevel)
|
updateLogLevel(conf.logLevel)
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
|
|
|
@ -1,13 +1,9 @@
|
||||||
import contracts/requests
|
import contracts/requests
|
||||||
import contracts/storage
|
import contracts/marketplace
|
||||||
import contracts/deployment
|
|
||||||
import contracts/market
|
import contracts/market
|
||||||
import contracts/proofs
|
|
||||||
import contracts/interactions
|
import contracts/interactions
|
||||||
|
|
||||||
export requests
|
export requests
|
||||||
export storage
|
export marketplace
|
||||||
export deployment
|
|
||||||
export market
|
export market
|
||||||
export proofs
|
|
||||||
export interactions
|
export interactions
|
||||||
|
|
|
@ -20,7 +20,7 @@ import ethers
|
||||||
|
|
||||||
let address = # fill in address where the contract was deployed
|
let address = # fill in address where the contract was deployed
|
||||||
let provider = JsonRpcProvider.new("ws://localhost:8545")
|
let provider = JsonRpcProvider.new("ws://localhost:8545")
|
||||||
let storage = Storage.new(address, provider)
|
let marketplace = Marketplace.new(address, provider)
|
||||||
```
|
```
|
||||||
|
|
||||||
Setup client and host so that they can sign transactions; here we use the first
|
Setup client and host so that they can sign transactions; here we use the first
|
||||||
|
@ -32,36 +32,6 @@ let client = provider.getSigner(accounts[0])
|
||||||
let host = provider.getSigner(accounts[1])
|
let host = provider.getSigner(accounts[1])
|
||||||
```
|
```
|
||||||
|
|
||||||
Collateral
|
|
||||||
----------
|
|
||||||
|
|
||||||
Hosts need to put up collateral before participating in storage contracts.
|
|
||||||
|
|
||||||
A host can learn about the amount of collateral that is required:
|
|
||||||
```nim
|
|
||||||
let collateralAmount = await storage.collateralAmount()
|
|
||||||
```
|
|
||||||
|
|
||||||
The host then needs to prepare a payment to the smart contract by calling the
|
|
||||||
`approve` method on the [ERC20 token][2]. Note that interaction with ERC20
|
|
||||||
contracts is not part of this library.
|
|
||||||
|
|
||||||
After preparing the payment, the host can deposit collateral:
|
|
||||||
```nim
|
|
||||||
await storage
|
|
||||||
.connect(host)
|
|
||||||
.deposit(collateralAmount)
|
|
||||||
```
|
|
||||||
|
|
||||||
When a host is not participating in storage offers or contracts, it can withdraw
|
|
||||||
its collateral:
|
|
||||||
|
|
||||||
```
|
|
||||||
await storage
|
|
||||||
.connect(host)
|
|
||||||
.withdraw()
|
|
||||||
```
|
|
||||||
|
|
||||||
Storage requests
|
Storage requests
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
|
@ -82,9 +52,7 @@ let request : StorageRequest = (
|
||||||
|
|
||||||
When a client wants to submit this request to the network, it needs to pay the
|
When a client wants to submit this request to the network, it needs to pay the
|
||||||
maximum price to the smart contract in advance. The difference between the
|
maximum price to the smart contract in advance. The difference between the
|
||||||
maximum price and the offered price will be reimbursed later. To prepare, the
|
maximum price and the offered price will be reimbursed later.
|
||||||
client needs to call the `approve` method on the [ERC20 token][2]. Note that
|
|
||||||
interaction with ERC20 contracts is not part of this library.
|
|
||||||
|
|
||||||
Once the payment has been prepared, the client can submit the request to the
|
Once the payment has been prepared, the client can submit the request to the
|
||||||
network:
|
network:
|
||||||
|
@ -151,7 +119,7 @@ Storage proofs
|
||||||
Time is divided into periods, and each period a storage proof may be required
|
Time is divided into periods, and each period a storage proof may be required
|
||||||
from the host. The odds of requiring a storage proof are negotiated through the
|
from the host. The odds of requiring a storage proof are negotiated through the
|
||||||
storage request. For more details about the timing of storage proofs, please
|
storage request. For more details about the timing of storage proofs, please
|
||||||
refer to the [design document][3].
|
refer to the [design document][2].
|
||||||
|
|
||||||
At the start of each period of time, the host can check whether a storage proof
|
At the start of each period of time, the host can check whether a storage proof
|
||||||
is required:
|
is required:
|
||||||
|
@ -176,6 +144,5 @@ await storage
|
||||||
.markProofAsMissing(id, period)
|
.markProofAsMissing(id, period)
|
||||||
```
|
```
|
||||||
|
|
||||||
[1]: https://github.com/status-im/dagger-contracts/
|
[1]: https://github.com/status-im/codex-contracts-eth/
|
||||||
[2]: https://ethereum.org/en/developers/docs/standards/tokens/erc-20/
|
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
||||||
[3]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
|
||||||
|
|
|
@ -3,41 +3,69 @@ import pkg/ethers
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import ../clock
|
import ../clock
|
||||||
|
import ../conf
|
||||||
|
|
||||||
export clock
|
export clock
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "contracts clock"
|
||||||
|
|
||||||
type
|
type
|
||||||
OnChainClock* = ref object of Clock
|
OnChainClock* = ref object of Clock
|
||||||
provider: Provider
|
provider: Provider
|
||||||
subscription: Subscription
|
subscription: Subscription
|
||||||
offset: times.Duration
|
offset: times.Duration
|
||||||
|
blockNumber: UInt256
|
||||||
started: bool
|
started: bool
|
||||||
|
newBlock: AsyncEvent
|
||||||
|
|
||||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||||
OnChainClock(provider: provider)
|
OnChainClock(provider: provider, newBlock: newAsyncEvent())
|
||||||
|
|
||||||
proc start*(clock: OnChainClock) {.async.} =
|
proc update(clock: OnChainClock, blck: Block) =
|
||||||
if clock.started:
|
if number =? blck.number and number > clock.blockNumber:
|
||||||
return
|
|
||||||
clock.started = true
|
|
||||||
|
|
||||||
proc onBlock(blck: Block) {.async, upraises:[].} =
|
|
||||||
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
|
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
|
||||||
let computerTime = getTime()
|
let computerTime = getTime()
|
||||||
clock.offset = blockTime - computerTime
|
clock.offset = blockTime - computerTime
|
||||||
|
clock.blockNumber = number
|
||||||
|
trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset
|
||||||
|
clock.newBlock.fire()
|
||||||
|
|
||||||
if latestBlock =? (await clock.provider.getBlock(BlockTag.latest)):
|
proc update(clock: OnChainClock) {.async.} =
|
||||||
await onBlock(latestBlock)
|
try:
|
||||||
|
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||||
|
clock.update(latest)
|
||||||
|
except CancelledError as error:
|
||||||
|
raise error
|
||||||
|
except CatchableError as error:
|
||||||
|
debug "error updating clock: ", error=error.msg
|
||||||
|
discard
|
||||||
|
|
||||||
|
method start*(clock: OnChainClock) {.async.} =
|
||||||
|
if clock.started:
|
||||||
|
return
|
||||||
|
|
||||||
|
proc onBlock(_: Block) =
|
||||||
|
# ignore block parameter; hardhat may call this with pending blocks
|
||||||
|
asyncSpawn clock.update()
|
||||||
|
|
||||||
|
await clock.update()
|
||||||
|
|
||||||
clock.subscription = await clock.provider.subscribe(onBlock)
|
clock.subscription = await clock.provider.subscribe(onBlock)
|
||||||
|
clock.started = true
|
||||||
|
|
||||||
proc stop*(clock: OnChainClock) {.async.} =
|
method stop*(clock: OnChainClock) {.async.} =
|
||||||
if not clock.started:
|
if not clock.started:
|
||||||
return
|
return
|
||||||
clock.started = false
|
|
||||||
|
|
||||||
await clock.subscription.unsubscribe()
|
await clock.subscription.unsubscribe()
|
||||||
|
clock.started = false
|
||||||
|
|
||||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||||
doAssert clock.started, "clock should be started before calling now()"
|
doAssert clock.started, "clock should be started before calling now()"
|
||||||
toUnix(getTime() + clock.offset)
|
return toUnix(getTime() + clock.offset)
|
||||||
|
|
||||||
|
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
|
||||||
|
while (let difference = time - clock.now(); difference > 0):
|
||||||
|
clock.newBlock.clear()
|
||||||
|
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
import pkg/contractabi
|
||||||
|
import pkg/ethers/fields
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
export contractabi
|
||||||
|
|
||||||
|
type
|
||||||
|
MarketplaceConfig* = object
|
||||||
|
collateral*: CollateralConfig
|
||||||
|
proofs*: ProofConfig
|
||||||
|
CollateralConfig* = object
|
||||||
|
repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed
|
||||||
|
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
||||||
|
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
|
||||||
|
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
||||||
|
ProofConfig* = object
|
||||||
|
period*: UInt256 # proofs requirements are calculated per period (in seconds)
|
||||||
|
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
|
||||||
|
downtime*: uint8 # ignore this much recent blocks for proof requirements
|
||||||
|
zkeyHash*: string # hash of the zkey file which is linked to the verifier
|
||||||
|
# Ensures the pointer does not remain in downtime for many consecutive
|
||||||
|
# periods. For each period increase, move the pointer `pointerProduct`
|
||||||
|
# blocks. Should be a prime number to ensure there are no cycles.
|
||||||
|
downtimeProduct*: uint8
|
||||||
|
|
||||||
|
|
||||||
|
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
||||||
|
ProofConfig(
|
||||||
|
period: tupl[0],
|
||||||
|
timeout: tupl[1],
|
||||||
|
downtime: tupl[2],
|
||||||
|
zkeyHash: tupl[3],
|
||||||
|
downtimeProduct: tupl[4]
|
||||||
|
)
|
||||||
|
|
||||||
|
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
||||||
|
CollateralConfig(
|
||||||
|
repairRewardPercentage: tupl[0],
|
||||||
|
maxNumberOfSlashes: tupl[1],
|
||||||
|
slashCriterion: tupl[2],
|
||||||
|
slashPercentage: tupl[3]
|
||||||
|
)
|
||||||
|
|
||||||
|
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
||||||
|
MarketplaceConfig(
|
||||||
|
collateral: tupl[0],
|
||||||
|
proofs: tupl[1]
|
||||||
|
)
|
||||||
|
|
||||||
|
func solidityType*(_: type ProofConfig): string =
|
||||||
|
solidityType(ProofConfig.fieldTypes)
|
||||||
|
|
||||||
|
func solidityType*(_: type CollateralConfig): string =
|
||||||
|
solidityType(CollateralConfig.fieldTypes)
|
||||||
|
|
||||||
|
func solidityType*(_: type MarketplaceConfig): string =
|
||||||
|
solidityType(CollateralConfig.fieldTypes)
|
||||||
|
|
||||||
|
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
|
||||||
|
encoder.write(slot.fieldValues)
|
||||||
|
|
||||||
|
func encode*(encoder: var AbiEncoder, slot: CollateralConfig) =
|
||||||
|
encoder.write(slot.fieldValues)
|
||||||
|
|
||||||
|
func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) =
|
||||||
|
encoder.write(slot.fieldValues)
|
||||||
|
|
||||||
|
func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
|
||||||
|
let tupl = ?decoder.read(ProofConfig.fieldTypes)
|
||||||
|
success ProofConfig.fromTuple(tupl)
|
||||||
|
|
||||||
|
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
|
||||||
|
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
|
||||||
|
success CollateralConfig.fromTuple(tupl)
|
||||||
|
|
||||||
|
func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T =
|
||||||
|
let tupl = ?decoder.read(MarketplaceConfig.fieldTypes)
|
||||||
|
success MarketplaceConfig.fromTuple(tupl)
|
|
@ -1,26 +1,43 @@
|
||||||
import std/json
|
|
||||||
import std/os
|
import std/os
|
||||||
|
import std/tables
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
|
|
||||||
type Deployment* = object
|
import ../conf
|
||||||
json: JsonNode
|
import ../logutils
|
||||||
|
import ./marketplace
|
||||||
|
|
||||||
const defaultFile = "vendor" / "dagger-contracts" / "deployment-localhost.json"
|
type Deployment* = ref object
|
||||||
|
provider: Provider
|
||||||
|
config: CodexConf
|
||||||
|
|
||||||
## Reads deployment information from a json file. It expects a file that has
|
const knownAddresses = {
|
||||||
## been exported with Hardhat deploy.
|
# Hardhat localhost network
|
||||||
## See also:
|
"31337": {
|
||||||
## https://github.com/wighawag/hardhat-deploy/tree/master#6-hardhat-export
|
"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"),
|
||||||
proc deployment*(file = defaultFile): Deployment =
|
}.toTable,
|
||||||
Deployment(json: parseFile(file))
|
# Taiko Alpha-3 Testnet
|
||||||
|
"167005": {
|
||||||
|
"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")
|
||||||
|
}.toTable
|
||||||
|
}.toTable
|
||||||
|
|
||||||
proc address*(deployment: Deployment, Contract: typedesc): ?Address =
|
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||||
if deployment.json == nil:
|
let id = chainId.toString(10)
|
||||||
|
notice "Looking for well-known contract address with ChainID ", chainId=id
|
||||||
|
|
||||||
|
if not (id in knownAddresses):
|
||||||
return none Address
|
return none Address
|
||||||
|
|
||||||
try:
|
return knownAddresses[id].getOrDefault($T, Address.none)
|
||||||
let address = deployment.json["contracts"][$Contract]["address"].getStr()
|
|
||||||
Address.init(address)
|
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
|
||||||
except KeyError:
|
Deployment(provider: provider, config: config)
|
||||||
none Address
|
|
||||||
|
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
|
||||||
|
when contract is Marketplace:
|
||||||
|
if address =? deployment.config.marketplaceAddress:
|
||||||
|
return some address
|
||||||
|
|
||||||
|
let chainId = await deployment.provider.getChainId()
|
||||||
|
return contract.getKnownAddress(chainId)
|
||||||
|
|
|
@ -1,78 +1,9 @@
|
||||||
import pkg/ethers
|
import ./interactions/interactions
|
||||||
import pkg/chronicles
|
import ./interactions/hostinteractions
|
||||||
import ../purchasing
|
import ./interactions/clientinteractions
|
||||||
import ../sales
|
import ./interactions/validatorinteractions
|
||||||
import ../proving
|
|
||||||
import ./deployment
|
|
||||||
import ./storage
|
|
||||||
import ./market
|
|
||||||
import ./proofs
|
|
||||||
import ./clock
|
|
||||||
|
|
||||||
export purchasing
|
export interactions
|
||||||
export sales
|
export hostinteractions
|
||||||
export proving
|
export clientinteractions
|
||||||
export chronicles
|
export validatorinteractions
|
||||||
|
|
||||||
type
|
|
||||||
ContractInteractions* = ref object
|
|
||||||
purchasing*: Purchasing
|
|
||||||
sales*: Sales
|
|
||||||
proving*: Proving
|
|
||||||
clock: OnChainClock
|
|
||||||
|
|
||||||
proc new*(_: type ContractInteractions,
|
|
||||||
signer: Signer,
|
|
||||||
deployment: Deployment): ?ContractInteractions =
|
|
||||||
|
|
||||||
without address =? deployment.address(Storage):
|
|
||||||
error "Unable to determine address of the Storage smart contract"
|
|
||||||
return none ContractInteractions
|
|
||||||
|
|
||||||
let contract = Storage.new(address, signer)
|
|
||||||
let market = OnChainMarket.new(contract)
|
|
||||||
let proofs = OnChainProofs.new(contract)
|
|
||||||
let clock = OnChainClock.new(signer.provider)
|
|
||||||
let proving = Proving.new(proofs, clock)
|
|
||||||
some ContractInteractions(
|
|
||||||
purchasing: Purchasing.new(market, clock),
|
|
||||||
sales: Sales.new(market, clock, proving),
|
|
||||||
proving: proving,
|
|
||||||
clock: clock
|
|
||||||
)
|
|
||||||
|
|
||||||
proc new*(_: type ContractInteractions,
|
|
||||||
providerUrl: string,
|
|
||||||
account: Address,
|
|
||||||
deploymentFile: string = string.default): ?ContractInteractions =
|
|
||||||
|
|
||||||
let provider = JsonRpcProvider.new(providerUrl)
|
|
||||||
let signer = provider.getSigner(account)
|
|
||||||
|
|
||||||
var deploy: Deployment
|
|
||||||
try:
|
|
||||||
if deploymentFile == string.default:
|
|
||||||
deploy = deployment()
|
|
||||||
else:
|
|
||||||
deploy = deployment(deploymentFile)
|
|
||||||
except IOError as e:
|
|
||||||
error "Unable to read deployment json", msg = e.msg
|
|
||||||
return none ContractInteractions
|
|
||||||
|
|
||||||
ContractInteractions.new(signer, deploy)
|
|
||||||
|
|
||||||
proc new*(_: type ContractInteractions,
|
|
||||||
account: Address): ?ContractInteractions =
|
|
||||||
ContractInteractions.new("ws://localhost:8545", account)
|
|
||||||
|
|
||||||
proc start*(interactions: ContractInteractions) {.async.} =
|
|
||||||
await interactions.clock.start()
|
|
||||||
await interactions.sales.start()
|
|
||||||
await interactions.proving.start()
|
|
||||||
await interactions.purchasing.start()
|
|
||||||
|
|
||||||
proc stop*(interactions: ContractInteractions) {.async.} =
|
|
||||||
await interactions.purchasing.stop()
|
|
||||||
await interactions.sales.stop()
|
|
||||||
await interactions.proving.stop()
|
|
||||||
await interactions.clock.stop()
|
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
import pkg/ethers
|
||||||
|
|
||||||
|
import ../../purchasing
|
||||||
|
import ../../logutils
|
||||||
|
import ../market
|
||||||
|
import ../clock
|
||||||
|
import ./interactions
|
||||||
|
|
||||||
|
export purchasing
|
||||||
|
export logutils
|
||||||
|
|
||||||
|
type
|
||||||
|
ClientInteractions* = ref object of ContractInteractions
|
||||||
|
purchasing*: Purchasing
|
||||||
|
|
||||||
|
proc new*(_: type ClientInteractions,
|
||||||
|
clock: OnChainClock,
|
||||||
|
purchasing: Purchasing): ClientInteractions =
|
||||||
|
ClientInteractions(clock: clock, purchasing: purchasing)
|
||||||
|
|
||||||
|
proc start*(self: ClientInteractions) {.async.} =
|
||||||
|
await procCall ContractInteractions(self).start()
|
||||||
|
await self.purchasing.start()
|
||||||
|
|
||||||
|
proc stop*(self: ClientInteractions) {.async.} =
|
||||||
|
await self.purchasing.stop()
|
||||||
|
await procCall ContractInteractions(self).stop()
|
|
@ -0,0 +1,29 @@
|
||||||
|
import pkg/chronos
|
||||||
|
|
||||||
|
import ../../logutils
|
||||||
|
import ../../sales
|
||||||
|
import ./interactions
|
||||||
|
|
||||||
|
export sales
|
||||||
|
export logutils
|
||||||
|
|
||||||
|
type
|
||||||
|
HostInteractions* = ref object of ContractInteractions
|
||||||
|
sales*: Sales
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
_: type HostInteractions,
|
||||||
|
clock: Clock,
|
||||||
|
sales: Sales
|
||||||
|
): HostInteractions =
|
||||||
|
## Create a new HostInteractions instance
|
||||||
|
##
|
||||||
|
HostInteractions(clock: clock, sales: sales)
|
||||||
|
|
||||||
|
method start*(self: HostInteractions) {.async.} =
|
||||||
|
await procCall ContractInteractions(self).start()
|
||||||
|
await self.sales.start()
|
||||||
|
|
||||||
|
method stop*(self: HostInteractions) {.async.} =
|
||||||
|
await self.sales.stop()
|
||||||
|
await procCall ContractInteractions(self).start()
|
|
@ -0,0 +1,16 @@
|
||||||
|
import pkg/ethers
|
||||||
|
import ../clock
|
||||||
|
import ../marketplace
|
||||||
|
import ../market
|
||||||
|
|
||||||
|
export clock
|
||||||
|
|
||||||
|
type
|
||||||
|
ContractInteractions* = ref object of RootObj
|
||||||
|
clock*: Clock
|
||||||
|
|
||||||
|
method start*(self: ContractInteractions) {.async, base.} =
|
||||||
|
discard
|
||||||
|
|
||||||
|
method stop*(self: ContractInteractions) {.async, base.} =
|
||||||
|
discard
|
|
@ -0,0 +1,21 @@
|
||||||
|
import ./interactions
|
||||||
|
import ../../validation
|
||||||
|
|
||||||
|
export validation
|
||||||
|
|
||||||
|
type
|
||||||
|
ValidatorInteractions* = ref object of ContractInteractions
|
||||||
|
validation: Validation
|
||||||
|
|
||||||
|
proc new*(_: type ValidatorInteractions,
|
||||||
|
clock: OnChainClock,
|
||||||
|
validation: Validation): ValidatorInteractions =
|
||||||
|
ValidatorInteractions(clock: clock, validation: validation)
|
||||||
|
|
||||||
|
proc start*(self: ValidatorInteractions) {.async.} =
|
||||||
|
await procCall ContractInteractions(self).start()
|
||||||
|
await self.validation.start()
|
||||||
|
|
||||||
|
proc stop*(self: ValidatorInteractions) {.async.} =
|
||||||
|
await self.validation.stop()
|
||||||
|
await procCall ContractInteractions(self).stop()
|
|
@ -1,99 +1,282 @@
|
||||||
|
import std/sequtils
|
||||||
import std/strutils
|
import std/strutils
|
||||||
|
import std/sugar
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
import pkg/ethers/testing
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
|
import ../utils/exceptions
|
||||||
|
import ../logutils
|
||||||
import ../market
|
import ../market
|
||||||
import ./storage
|
import ./marketplace
|
||||||
|
import ./proofs
|
||||||
|
|
||||||
export market
|
export market
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "marketplace onchain market"
|
||||||
|
|
||||||
type
|
type
|
||||||
OnChainMarket* = ref object of Market
|
OnChainMarket* = ref object of Market
|
||||||
contract: Storage
|
contract: Marketplace
|
||||||
signer: Signer
|
signer: Signer
|
||||||
MarketSubscription = market.Subscription
|
MarketSubscription = market.Subscription
|
||||||
EventSubscription = ethers.Subscription
|
EventSubscription = ethers.Subscription
|
||||||
OnChainMarketSubscription = ref object of MarketSubscription
|
OnChainMarketSubscription = ref object of MarketSubscription
|
||||||
eventSubscription: EventSubscription
|
eventSubscription: EventSubscription
|
||||||
|
|
||||||
func new*(_: type OnChainMarket, contract: Storage): OnChainMarket =
|
func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket =
|
||||||
without signer =? contract.signer:
|
without signer =? contract.signer:
|
||||||
raiseAssert("Storage contract should have a signer")
|
raiseAssert("Marketplace contract should have a signer")
|
||||||
OnChainMarket(
|
OnChainMarket(
|
||||||
contract: contract,
|
contract: contract,
|
||||||
signer: signer,
|
signer: signer,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||||
|
raise newException(MarketError, message)
|
||||||
|
|
||||||
|
template convertEthersError(body) =
|
||||||
|
try:
|
||||||
|
body
|
||||||
|
except EthersError as error:
|
||||||
|
raiseMarketError(error.msgDetail)
|
||||||
|
|
||||||
|
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||||
|
debug "Approving tokens", amount
|
||||||
|
convertEthersError:
|
||||||
|
let tokenAddress = await market.contract.token()
|
||||||
|
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||||
|
discard await token.increaseAllowance(market.contract.address(), amount).confirm(0)
|
||||||
|
|
||||||
|
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
||||||
|
let config = await market.contract.config()
|
||||||
|
return some config.proofs.zkeyHash
|
||||||
|
|
||||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||||
return await market.signer.getAddress()
|
convertEthersError:
|
||||||
|
return await market.signer.getAddress()
|
||||||
|
|
||||||
|
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let config = await market.contract.config()
|
||||||
|
let period = config.proofs.period
|
||||||
|
return Periodicity(seconds: period)
|
||||||
|
|
||||||
|
method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let config = await market.contract.config()
|
||||||
|
return config.proofs.timeout
|
||||||
|
|
||||||
|
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let config = await market.contract.config()
|
||||||
|
return config.proofs.downtime
|
||||||
|
|
||||||
|
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
return await market.contract.getPointer(slotId, overrides)
|
||||||
|
|
||||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||||
return await market.contract.myRequests
|
convertEthersError:
|
||||||
|
return await market.contract.myRequests
|
||||||
|
|
||||||
|
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let slots = await market.contract.mySlots()
|
||||||
|
debug "Fetched my slots", numSlots=len(slots)
|
||||||
|
|
||||||
|
return slots
|
||||||
|
|
||||||
method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
|
method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
|
||||||
await market.contract.requestStorage(request)
|
convertEthersError:
|
||||||
|
debug "Requesting storage"
|
||||||
|
await market.approveFunds(request.price())
|
||||||
|
discard await market.contract.requestStorage(request).confirm(0)
|
||||||
|
|
||||||
method getRequest(market: OnChainMarket,
|
method getRequest(market: OnChainMarket,
|
||||||
id: RequestId): Future[?StorageRequest] {.async.} =
|
id: RequestId): Future[?StorageRequest] {.async.} =
|
||||||
try:
|
convertEthersError:
|
||||||
return some await market.contract.getRequest(id)
|
try:
|
||||||
except ProviderError as e:
|
return some await market.contract.getRequest(id)
|
||||||
if e.revertReason.contains("Unknown request"):
|
except ProviderError as e:
|
||||||
return none StorageRequest
|
if e.msgDetail.contains("Unknown request"):
|
||||||
raise e
|
return none StorageRequest
|
||||||
|
raise e
|
||||||
|
|
||||||
method getState*(market: OnChainMarket,
|
method requestState*(market: OnChainMarket,
|
||||||
requestId: RequestId): Future[?RequestState] {.async.} =
|
requestId: RequestId): Future[?RequestState] {.async.} =
|
||||||
try:
|
convertEthersError:
|
||||||
return some await market.contract.state(requestId)
|
try:
|
||||||
except ProviderError as e:
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
if e.revertReason.contains("Unknown request"):
|
return some await market.contract.requestState(requestId, overrides)
|
||||||
return none RequestState
|
except ProviderError as e:
|
||||||
raise e
|
if e.msgDetail.contains("Unknown request"):
|
||||||
|
return none RequestState
|
||||||
|
raise e
|
||||||
|
|
||||||
|
method slotState*(market: OnChainMarket,
|
||||||
|
slotId: SlotId): Future[SlotState] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
return await market.contract.slotState(slotId, overrides)
|
||||||
|
|
||||||
method getRequestEnd*(market: OnChainMarket,
|
method getRequestEnd*(market: OnChainMarket,
|
||||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
id: RequestId): Future[SecondsSince1970] {.async.} =
|
||||||
return await market.contract.requestEnd(id)
|
convertEthersError:
|
||||||
|
return await market.contract.requestEnd(id)
|
||||||
|
|
||||||
|
method requestExpiresAt*(market: OnChainMarket,
|
||||||
|
id: RequestId): Future[SecondsSince1970] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
return await market.contract.requestExpiry(id)
|
||||||
|
|
||||||
method getHost(market: OnChainMarket,
|
method getHost(market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256): Future[?Address] {.async.} =
|
slotIndex: UInt256): Future[?Address] {.async.} =
|
||||||
let slotId = slotId(requestId, slotIndex)
|
convertEthersError:
|
||||||
let address = await market.contract.getHost(slotId)
|
let slotId = slotId(requestId, slotIndex)
|
||||||
if address != Address.default:
|
let address = await market.contract.getHost(slotId)
|
||||||
return some address
|
if address != Address.default:
|
||||||
else:
|
return some address
|
||||||
return none Address
|
else:
|
||||||
|
return none Address
|
||||||
|
|
||||||
|
method getActiveSlot*(market: OnChainMarket,
|
||||||
|
slotId: SlotId): Future[?Slot] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
try:
|
||||||
|
return some await market.contract.getActiveSlot(slotId)
|
||||||
|
except ProviderError as e:
|
||||||
|
if e.msgDetail.contains("Slot is free"):
|
||||||
|
return none Slot
|
||||||
|
raise e
|
||||||
|
|
||||||
method fillSlot(market: OnChainMarket,
|
method fillSlot(market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
proof: seq[byte]) {.async.} =
|
proof: Groth16Proof,
|
||||||
await market.contract.fillSlot(requestId, slotIndex, proof)
|
collateral: UInt256) {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
await market.approveFunds(collateral)
|
||||||
|
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(0)
|
||||||
|
|
||||||
|
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
discard await market.contract.freeSlot(slotId).confirm(0)
|
||||||
|
|
||||||
method withdrawFunds(market: OnChainMarket,
|
method withdrawFunds(market: OnChainMarket,
|
||||||
requestId: RequestId) {.async.} =
|
requestId: RequestId) {.async.} =
|
||||||
await market.contract.withdrawFunds(requestId)
|
convertEthersError:
|
||||||
|
discard await market.contract.withdrawFunds(requestId).confirm(0)
|
||||||
|
|
||||||
method subscribeRequests(market: OnChainMarket,
|
method isProofRequired*(market: OnChainMarket,
|
||||||
|
id: SlotId): Future[bool] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
try:
|
||||||
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
return await market.contract.isProofRequired(id, overrides)
|
||||||
|
except ProviderError as e:
|
||||||
|
if e.msgDetail.contains("Slot is free"):
|
||||||
|
return false
|
||||||
|
raise e
|
||||||
|
|
||||||
|
method willProofBeRequired*(market: OnChainMarket,
|
||||||
|
id: SlotId): Future[bool] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
try:
|
||||||
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
return await market.contract.willProofBeRequired(id, overrides)
|
||||||
|
except ProviderError as e:
|
||||||
|
if e.msgDetail.contains("Slot is free"):
|
||||||
|
return false
|
||||||
|
raise e
|
||||||
|
|
||||||
|
method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
return await market.contract.getChallenge(id, overrides)
|
||||||
|
|
||||||
|
method submitProof*(market: OnChainMarket,
|
||||||
|
id: SlotId,
|
||||||
|
proof: Groth16Proof) {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
discard await market.contract.submitProof(id, proof).confirm(0)
|
||||||
|
|
||||||
|
method markProofAsMissing*(market: OnChainMarket,
|
||||||
|
id: SlotId,
|
||||||
|
period: Period) {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
discard await market.contract.markProofAsMissing(id, period).confirm(0)
|
||||||
|
|
||||||
|
method canProofBeMarkedAsMissing*(
|
||||||
|
market: OnChainMarket,
|
||||||
|
id: SlotId,
|
||||||
|
period: Period
|
||||||
|
): Future[bool] {.async.} =
|
||||||
|
let provider = market.contract.provider
|
||||||
|
let contractWithoutSigner = market.contract.connect(provider)
|
||||||
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
try:
|
||||||
|
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
|
||||||
|
return true
|
||||||
|
except EthersError as e:
|
||||||
|
trace "Proof cannot be marked as missing", msg = e.msg
|
||||||
|
return false
|
||||||
|
|
||||||
|
method subscribeRequests*(market: OnChainMarket,
|
||||||
callback: OnRequest):
|
callback: OnRequest):
|
||||||
Future[MarketSubscription] {.async.} =
|
Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(event: StorageRequested) {.upraises:[].} =
|
proc onEvent(event: StorageRequested) {.upraises:[].} =
|
||||||
callback(event.requestId, event.ask)
|
callback(event.requestId,
|
||||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
event.ask,
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
event.expiry)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
|
method subscribeSlotFilled*(market: OnChainMarket,
|
||||||
|
callback: OnSlotFilled):
|
||||||
|
Future[MarketSubscription] {.async.} =
|
||||||
|
proc onEvent(event: SlotFilled) {.upraises:[].} =
|
||||||
|
callback(event.requestId, event.slotIndex)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeSlotFilled*(market: OnChainMarket,
|
method subscribeSlotFilled*(market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
callback: OnSlotFilled):
|
callback: OnSlotFilled):
|
||||||
Future[MarketSubscription] {.async.} =
|
Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(event: SlotFilled) {.upraises:[].} =
|
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
|
||||||
if event.requestId == requestId and event.slotIndex == slotIndex:
|
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||||
callback(event.requestId, event.slotIndex)
|
callback(requestId, slotIndex)
|
||||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
convertEthersError:
|
||||||
|
return await market.subscribeSlotFilled(onSlotFilled)
|
||||||
|
|
||||||
|
method subscribeSlotFreed*(market: OnChainMarket,
|
||||||
|
callback: OnSlotFreed):
|
||||||
|
Future[MarketSubscription] {.async.} =
|
||||||
|
proc onEvent(event: SlotFreed) {.upraises:[].} =
|
||||||
|
callback(event.requestId, event.slotIndex)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
|
method subscribeFulfillment(market: OnChainMarket,
|
||||||
|
callback: OnFulfillment):
|
||||||
|
Future[MarketSubscription] {.async.} =
|
||||||
|
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||||
|
callback(event.requestId)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeFulfillment(market: OnChainMarket,
|
method subscribeFulfillment(market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
|
@ -102,8 +285,20 @@ method subscribeFulfillment(market: OnChainMarket,
|
||||||
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||||
if event.requestId == requestId:
|
if event.requestId == requestId:
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
|
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||||
|
callback: OnRequestCancelled):
|
||||||
|
Future[MarketSubscription] {.async.} =
|
||||||
|
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||||
|
callback(event.requestId)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
|
@ -112,18 +307,63 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
||||||
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||||
if event.requestId == requestId:
|
if event.requestId == requestId:
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
|
method subscribeRequestFailed*(market: OnChainMarket,
|
||||||
|
callback: OnRequestFailed):
|
||||||
|
Future[MarketSubscription] {.async.} =
|
||||||
|
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||||
|
callback(event.requestId)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestFailed*(market: OnChainMarket,
|
method subscribeRequestFailed*(market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
callback: OnRequestFailed):
|
callback: OnRequestFailed):
|
||||||
Future[MarketSubscription] {.async.} =
|
Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(event: RequestFailed) {.upraises:[].} =
|
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||||
if event.requestId == requestId:
|
if event.requestId == requestId:
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
|
method subscribeProofSubmission*(market: OnChainMarket,
|
||||||
|
callback: OnProofSubmitted):
|
||||||
|
Future[MarketSubscription] {.async.} =
|
||||||
|
proc onEvent(event: ProofSubmitted) {.upraises: [].} =
|
||||||
|
callback(event.id)
|
||||||
|
|
||||||
|
convertEthersError:
|
||||||
|
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||||
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||||
await subscription.eventSubscription.unsubscribe()
|
await subscription.eventSubscription.unsubscribe()
|
||||||
|
|
||||||
|
method queryPastStorageRequests*(market: OnChainMarket,
|
||||||
|
blocksAgo: int):
|
||||||
|
Future[seq[PastStorageRequest]] {.async.} =
|
||||||
|
convertEthersError:
|
||||||
|
let contract = market.contract
|
||||||
|
let provider = contract.provider
|
||||||
|
|
||||||
|
let head = await provider.getBlockNumber()
|
||||||
|
let fromBlock = BlockTag.init(head - blocksAgo.abs.u256)
|
||||||
|
|
||||||
|
let events = await contract.queryFilter(StorageRequested,
|
||||||
|
fromBlock,
|
||||||
|
BlockTag.latest)
|
||||||
|
return events.map(event =>
|
||||||
|
PastStorageRequest(
|
||||||
|
requestId: event.requestId,
|
||||||
|
ask: event.ask,
|
||||||
|
expiry: event.expiry
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
import pkg/ethers
|
||||||
|
import pkg/ethers/erc20
|
||||||
|
import pkg/json_rpc/rpcclient
|
||||||
|
import pkg/stint
|
||||||
|
import pkg/chronos
|
||||||
|
import ../clock
|
||||||
|
import ./requests
|
||||||
|
import ./proofs
|
||||||
|
import ./config
|
||||||
|
|
||||||
|
export stint
|
||||||
|
export ethers except `%`, `%*`, toJson
|
||||||
|
export erc20 except `%`, `%*`, toJson
|
||||||
|
export config
|
||||||
|
export requests
|
||||||
|
|
||||||
|
type
|
||||||
|
Marketplace* = ref object of Contract
|
||||||
|
StorageRequested* = object of Event
|
||||||
|
requestId*: RequestId
|
||||||
|
ask*: StorageAsk
|
||||||
|
expiry*: UInt256
|
||||||
|
SlotFilled* = object of Event
|
||||||
|
requestId* {.indexed.}: RequestId
|
||||||
|
slotIndex*: UInt256
|
||||||
|
SlotFreed* = object of Event
|
||||||
|
requestId* {.indexed.}: RequestId
|
||||||
|
slotIndex*: UInt256
|
||||||
|
RequestFulfilled* = object of Event
|
||||||
|
requestId* {.indexed.}: RequestId
|
||||||
|
RequestCancelled* = object of Event
|
||||||
|
requestId* {.indexed.}: RequestId
|
||||||
|
RequestFailed* = object of Event
|
||||||
|
requestId* {.indexed.}: RequestId
|
||||||
|
ProofSubmitted* = object of Event
|
||||||
|
id*: SlotId
|
||||||
|
|
||||||
|
|
||||||
|
proc config*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||||
|
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||||
|
proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
|
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
|
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
|
|
||||||
|
proc requestStorage*(marketplace: Marketplace, request: StorageRequest): ?TransactionResponse {.contract.}
|
||||||
|
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): ?TransactionResponse {.contract.}
|
||||||
|
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): ?TransactionResponse {.contract.}
|
||||||
|
proc freeSlot*(marketplace: Marketplace, id: SlotId): ?TransactionResponse {.contract.}
|
||||||
|
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.}
|
||||||
|
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
|
||||||
|
proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.}
|
||||||
|
|
||||||
|
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
|
||||||
|
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
|
||||||
|
proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.}
|
||||||
|
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
|
||||||
|
proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||||
|
proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||||
|
|
||||||
|
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
|
|
||||||
|
proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||||
|
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||||
|
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||||
|
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||||
|
proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.}
|
||||||
|
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
|
||||||
|
|
||||||
|
proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): ?TransactionResponse {.contract.}
|
||||||
|
proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): ?TransactionResponse {.contract.}
|
|
@ -1,68 +1,43 @@
|
||||||
import std/strutils
|
import pkg/stint
|
||||||
import pkg/ethers
|
import pkg/contractabi
|
||||||
import pkg/ethers/testing
|
import pkg/ethers/fields
|
||||||
import ../storageproofs/timing/proofs
|
|
||||||
import ./storage
|
|
||||||
|
|
||||||
export proofs
|
|
||||||
|
|
||||||
type
|
type
|
||||||
OnChainProofs* = ref object of Proofs
|
Groth16Proof* = object
|
||||||
storage: Storage
|
a*: G1Point
|
||||||
pollInterval*: Duration
|
b*: G2Point
|
||||||
ProofsSubscription = proofs.Subscription
|
c*: G1Point
|
||||||
EventSubscription = ethers.Subscription
|
G1Point* = object
|
||||||
OnChainProofsSubscription = ref object of ProofsSubscription
|
x*: UInt256
|
||||||
eventSubscription: EventSubscription
|
y*: UInt256
|
||||||
|
# A field element F_{p^2} encoded as `real + i * imag`
|
||||||
|
Fp2Element* = object
|
||||||
|
real*: UInt256
|
||||||
|
imag*: UInt256
|
||||||
|
G2Point* = object
|
||||||
|
x*: Fp2Element
|
||||||
|
y*: Fp2Element
|
||||||
|
|
||||||
const DefaultPollInterval = 3.seconds
|
func solidityType*(_: type G1Point): string =
|
||||||
|
solidityType(G1Point.fieldTypes)
|
||||||
|
|
||||||
proc new*(_: type OnChainProofs, storage: Storage): OnChainProofs =
|
func solidityType*(_: type Fp2Element): string =
|
||||||
OnChainProofs(storage: storage, pollInterval: DefaultPollInterval)
|
solidityType(Fp2Element.fieldTypes)
|
||||||
|
|
||||||
method periodicity*(proofs: OnChainProofs): Future[Periodicity] {.async.} =
|
func solidityType*(_: type G2Point): string =
|
||||||
let period = await proofs.storage.proofPeriod()
|
solidityType(G2Point.fieldTypes)
|
||||||
return Periodicity(seconds: period)
|
|
||||||
|
|
||||||
method isProofRequired*(proofs: OnChainProofs,
|
func solidityType*(_: type Groth16Proof): string =
|
||||||
id: SlotId): Future[bool] {.async.} =
|
solidityType(Groth16Proof.fieldTypes)
|
||||||
try:
|
|
||||||
return await proofs.storage.isProofRequired(id)
|
|
||||||
except ProviderError as e:
|
|
||||||
if e.revertReason.contains("Slot empty"):
|
|
||||||
return false
|
|
||||||
raise e
|
|
||||||
|
|
||||||
method willProofBeRequired*(proofs: OnChainProofs,
|
func encode*(encoder: var AbiEncoder, point: G1Point) =
|
||||||
id: SlotId): Future[bool] {.async.} =
|
encoder.write(point.fieldValues)
|
||||||
try:
|
|
||||||
return await proofs.storage.willProofBeRequired(id)
|
|
||||||
except ProviderError as e:
|
|
||||||
if e.revertReason.contains("Slot empty"):
|
|
||||||
return false
|
|
||||||
raise e
|
|
||||||
|
|
||||||
method getProofEnd*(proofs: OnChainProofs,
|
func encode*(encoder: var AbiEncoder, element: Fp2Element) =
|
||||||
id: SlotId): Future[UInt256] {.async.} =
|
encoder.write(element.fieldValues)
|
||||||
try:
|
|
||||||
return await proofs.storage.proofEnd(id)
|
|
||||||
except ProviderError as e:
|
|
||||||
if e.revertReason.contains("Slot empty"):
|
|
||||||
return 0.u256
|
|
||||||
raise e
|
|
||||||
|
|
||||||
method submitProof*(proofs: OnChainProofs,
|
func encode*(encoder: var AbiEncoder, point: G2Point) =
|
||||||
id: SlotId,
|
encoder.write(point.fieldValues)
|
||||||
proof: seq[byte]) {.async.} =
|
|
||||||
await proofs.storage.submitProof(id, proof)
|
|
||||||
|
|
||||||
method subscribeProofSubmission*(proofs: OnChainProofs,
|
func encode*(encoder: var AbiEncoder, proof: Groth16Proof) =
|
||||||
callback: OnProofSubmitted):
|
encoder.write(proof.fieldValues)
|
||||||
Future[ProofsSubscription] {.async.} =
|
|
||||||
proc onEvent(event: ProofSubmitted) {.upraises: [].} =
|
|
||||||
callback(event.id, event.proof)
|
|
||||||
let subscription = await proofs.storage.subscribe(ProofSubmitted, onEvent)
|
|
||||||
return OnChainProofsSubscription(eventSubscription: subscription)
|
|
||||||
|
|
||||||
method unsubscribe*(subscription: OnChainProofsSubscription) {.async, upraises:[].} =
|
|
||||||
await subscription.eventSubscription.unsubscribe()
|
|
||||||
|
|
|
@ -1,35 +1,38 @@
|
||||||
import std/hashes
|
import std/hashes
|
||||||
|
import std/sequtils
|
||||||
|
import std/typetraits
|
||||||
import pkg/contractabi
|
import pkg/contractabi
|
||||||
import pkg/nimcrypto
|
import pkg/nimcrypto
|
||||||
import pkg/ethers/fields
|
import pkg/ethers/fields
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
import pkg/stew/byteutils
|
||||||
|
import pkg/upraises
|
||||||
|
import ../logutils
|
||||||
|
import ../utils/json
|
||||||
|
|
||||||
export contractabi
|
export contractabi
|
||||||
|
|
||||||
type
|
type
|
||||||
StorageRequest* = object
|
StorageRequest* = object
|
||||||
client*: Address
|
client* {.serialize.}: Address
|
||||||
ask*: StorageAsk
|
ask* {.serialize.}: StorageAsk
|
||||||
content*: StorageContent
|
content* {.serialize.}: StorageContent
|
||||||
expiry*: UInt256
|
expiry* {.serialize.}: UInt256
|
||||||
nonce*: Nonce
|
nonce*: Nonce
|
||||||
StorageAsk* = object
|
StorageAsk* = object
|
||||||
slots*: uint64
|
slots* {.serialize.}: uint64
|
||||||
slotSize*: UInt256
|
slotSize* {.serialize.}: UInt256
|
||||||
duration*: UInt256
|
duration* {.serialize.}: UInt256
|
||||||
proofProbability*: UInt256
|
proofProbability* {.serialize.}: UInt256
|
||||||
reward*: UInt256
|
reward* {.serialize.}: UInt256
|
||||||
maxSlotLoss*: uint64
|
collateral* {.serialize.}: UInt256
|
||||||
|
maxSlotLoss* {.serialize.}: uint64
|
||||||
StorageContent* = object
|
StorageContent* = object
|
||||||
cid*: string
|
cid* {.serialize.}: string
|
||||||
erasure*: StorageErasure
|
merkleRoot*: array[32, byte]
|
||||||
por*: StoragePoR
|
Slot* = object
|
||||||
StorageErasure* = object
|
request* {.serialize.}: StorageRequest
|
||||||
totalChunks*: uint64
|
slotIndex* {.serialize.}: UInt256
|
||||||
StoragePoR* = object
|
|
||||||
u*: seq[byte]
|
|
||||||
publicKey*: seq[byte]
|
|
||||||
name*: seq[byte]
|
|
||||||
SlotId* = distinct array[32, byte]
|
SlotId* = distinct array[32, byte]
|
||||||
RequestId* = distinct array[32, byte]
|
RequestId* = distinct array[32, byte]
|
||||||
Nonce* = distinct array[32, byte]
|
Nonce* = distinct array[32, byte]
|
||||||
|
@ -39,11 +42,20 @@ type
|
||||||
Cancelled
|
Cancelled
|
||||||
Finished
|
Finished
|
||||||
Failed
|
Failed
|
||||||
|
SlotState* {.pure.} = enum
|
||||||
|
Free
|
||||||
|
Filled
|
||||||
|
Finished
|
||||||
|
Failed
|
||||||
|
Paid
|
||||||
|
Cancelled
|
||||||
|
|
||||||
proc `==`*(x, y: Nonce): bool {.borrow.}
|
proc `==`*(x, y: Nonce): bool {.borrow.}
|
||||||
proc `==`*(x, y: RequestId): bool {.borrow.}
|
proc `==`*(x, y: RequestId): bool {.borrow.}
|
||||||
proc `==`*(x, y: SlotId): bool {.borrow.}
|
proc `==`*(x, y: SlotId): bool {.borrow.}
|
||||||
proc hash*(x: SlotId): Hash {.borrow.}
|
proc hash*(x: SlotId): Hash {.borrow.}
|
||||||
|
proc hash*(x: Nonce): Hash {.borrow.}
|
||||||
|
proc hash*(x: Address): Hash {.borrow.}
|
||||||
|
|
||||||
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
|
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
|
||||||
array[32, byte](id)
|
array[32, byte](id)
|
||||||
|
@ -51,6 +63,30 @@ func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
|
||||||
proc `$`*(id: RequestId | SlotId | Nonce): string =
|
proc `$`*(id: RequestId | SlotId | Nonce): string =
|
||||||
id.toArray.toHex
|
id.toArray.toHex
|
||||||
|
|
||||||
|
proc fromHex*(T: type RequestId, hex: string): T =
|
||||||
|
T array[32, byte].fromHex(hex)
|
||||||
|
|
||||||
|
proc fromHex*(T: type SlotId, hex: string): T =
|
||||||
|
T array[32, byte].fromHex(hex)
|
||||||
|
|
||||||
|
proc fromHex*(T: type Nonce, hex: string): T =
|
||||||
|
T array[32, byte].fromHex(hex)
|
||||||
|
|
||||||
|
proc fromHex*[T: distinct](_: type T, hex: string): T =
|
||||||
|
type baseType = T.distinctBase
|
||||||
|
T baseType.fromHex(hex)
|
||||||
|
|
||||||
|
proc toHex*[T: distinct](id: T): string =
|
||||||
|
type baseType = T.distinctBase
|
||||||
|
baseType(id).toHex
|
||||||
|
|
||||||
|
logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog
|
||||||
|
|
||||||
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
||||||
StorageRequest(
|
StorageRequest(
|
||||||
client: tupl[0],
|
client: tupl[0],
|
||||||
|
@ -60,6 +96,12 @@ func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
||||||
nonce: tupl[4]
|
nonce: tupl[4]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func fromTuple(_: type Slot, tupl: tuple): Slot =
|
||||||
|
Slot(
|
||||||
|
request: tupl[0],
|
||||||
|
slotIndex: tupl[1]
|
||||||
|
)
|
||||||
|
|
||||||
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||||
StorageAsk(
|
StorageAsk(
|
||||||
slots: tupl[0],
|
slots: tupl[0],
|
||||||
|
@ -67,34 +109,16 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||||
duration: tupl[2],
|
duration: tupl[2],
|
||||||
proofProbability: tupl[3],
|
proofProbability: tupl[3],
|
||||||
reward: tupl[4],
|
reward: tupl[4],
|
||||||
maxSlotLoss: tupl[5]
|
collateral: tupl[5],
|
||||||
|
maxSlotLoss: tupl[6]
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
||||||
StorageContent(
|
StorageContent(
|
||||||
cid: tupl[0],
|
cid: tupl[0],
|
||||||
erasure: tupl[1],
|
merkleRoot: tupl[1]
|
||||||
por: tupl[2]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromTuple(_: type StorageErasure, tupl: tuple): StorageErasure =
|
|
||||||
StorageErasure(
|
|
||||||
totalChunks: tupl[0]
|
|
||||||
)
|
|
||||||
|
|
||||||
func fromTuple(_: type StoragePoR, tupl: tuple): StoragePoR =
|
|
||||||
StoragePoR(
|
|
||||||
u: tupl[0],
|
|
||||||
publicKey: tupl[1],
|
|
||||||
name: tupl[2]
|
|
||||||
)
|
|
||||||
|
|
||||||
func solidityType*(_: type StoragePoR): string =
|
|
||||||
solidityType(StoragePoR.fieldTypes)
|
|
||||||
|
|
||||||
func solidityType*(_: type StorageErasure): string =
|
|
||||||
solidityType(StorageErasure.fieldTypes)
|
|
||||||
|
|
||||||
func solidityType*(_: type StorageContent): string =
|
func solidityType*(_: type StorageContent): string =
|
||||||
solidityType(StorageContent.fieldTypes)
|
solidityType(StorageContent.fieldTypes)
|
||||||
|
|
||||||
|
@ -104,15 +128,6 @@ func solidityType*(_: type StorageAsk): string =
|
||||||
func solidityType*(_: type StorageRequest): string =
|
func solidityType*(_: type StorageRequest): string =
|
||||||
solidityType(StorageRequest.fieldTypes)
|
solidityType(StorageRequest.fieldTypes)
|
||||||
|
|
||||||
func solidityType*[T: RequestId | SlotId | Nonce](_: type T): string =
|
|
||||||
solidityType(array[32, byte])
|
|
||||||
|
|
||||||
func encode*(encoder: var AbiEncoder, por: StoragePoR) =
|
|
||||||
encoder.write(por.fieldValues)
|
|
||||||
|
|
||||||
func encode*(encoder: var AbiEncoder, erasure: StorageErasure) =
|
|
||||||
encoder.write(erasure.fieldValues)
|
|
||||||
|
|
||||||
func encode*(encoder: var AbiEncoder, content: StorageContent) =
|
func encode*(encoder: var AbiEncoder, content: StorageContent) =
|
||||||
encoder.write(content.fieldValues)
|
encoder.write(content.fieldValues)
|
||||||
|
|
||||||
|
@ -125,18 +140,8 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
|
||||||
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
|
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
|
||||||
encoder.write(request.fieldValues)
|
encoder.write(request.fieldValues)
|
||||||
|
|
||||||
func decode*[T: RequestId | SlotId | Nonce](decoder: var AbiDecoder,
|
func encode*(encoder: var AbiEncoder, request: Slot) =
|
||||||
_: type T): ?!T =
|
encoder.write(request.fieldValues)
|
||||||
let nonce = ?decoder.read(type array[32, byte])
|
|
||||||
success T(nonce)
|
|
||||||
|
|
||||||
func decode*(decoder: var AbiDecoder, T: type StoragePoR): ?!T =
|
|
||||||
let tupl = ?decoder.read(StoragePoR.fieldTypes)
|
|
||||||
success StoragePoR.fromTuple(tupl)
|
|
||||||
|
|
||||||
func decode*(decoder: var AbiDecoder, T: type StorageErasure): ?!T =
|
|
||||||
let tupl = ?decoder.read(StorageErasure.fieldTypes)
|
|
||||||
success StorageErasure.fromTuple(tupl)
|
|
||||||
|
|
||||||
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
|
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
|
||||||
let tupl = ?decoder.read(StorageContent.fieldTypes)
|
let tupl = ?decoder.read(StorageContent.fieldTypes)
|
||||||
|
@ -150,6 +155,10 @@ func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T =
|
||||||
let tupl = ?decoder.read(StorageRequest.fieldTypes)
|
let tupl = ?decoder.read(StorageRequest.fieldTypes)
|
||||||
success StorageRequest.fromTuple(tupl)
|
success StorageRequest.fromTuple(tupl)
|
||||||
|
|
||||||
|
func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
|
||||||
|
let tupl = ?decoder.read(Slot.fieldTypes)
|
||||||
|
success Slot.fromTuple(tupl)
|
||||||
|
|
||||||
func id*(request: StorageRequest): RequestId =
|
func id*(request: StorageRequest): RequestId =
|
||||||
let encoding = AbiEncoder.encode((request, ))
|
let encoding = AbiEncoder.encode((request, ))
|
||||||
RequestId(keccak256.digest(encoding).data)
|
RequestId(keccak256.digest(encoding).data)
|
||||||
|
@ -161,6 +170,9 @@ func slotId*(requestId: RequestId, slot: UInt256): SlotId =
|
||||||
func slotId*(request: StorageRequest, slot: UInt256): SlotId =
|
func slotId*(request: StorageRequest, slot: UInt256): SlotId =
|
||||||
slotId(request.id, slot)
|
slotId(request.id, slot)
|
||||||
|
|
||||||
|
func id*(slot: Slot): SlotId =
|
||||||
|
slotId(slot.request, slot.slotIndex)
|
||||||
|
|
||||||
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
||||||
ask.duration * ask.reward
|
ask.duration * ask.reward
|
||||||
|
|
||||||
|
|
|
@ -1,62 +0,0 @@
|
||||||
import pkg/ethers
|
|
||||||
import pkg/json_rpc/rpcclient
|
|
||||||
import pkg/stint
|
|
||||||
import pkg/chronos
|
|
||||||
import ../clock
|
|
||||||
import ./requests
|
|
||||||
|
|
||||||
export stint
|
|
||||||
export ethers
|
|
||||||
|
|
||||||
type
|
|
||||||
Storage* = ref object of Contract
|
|
||||||
StorageRequested* = object of Event
|
|
||||||
requestId*: RequestId
|
|
||||||
ask*: StorageAsk
|
|
||||||
SlotFilled* = object of Event
|
|
||||||
requestId* {.indexed.}: RequestId
|
|
||||||
slotIndex* {.indexed.}: UInt256
|
|
||||||
slotId*: SlotId
|
|
||||||
RequestFulfilled* = object of Event
|
|
||||||
requestId* {.indexed.}: RequestId
|
|
||||||
RequestCancelled* = object of Event
|
|
||||||
requestId* {.indexed.}: RequestId
|
|
||||||
RequestFailed* = object of Event
|
|
||||||
requestId* {.indexed.}: RequestId
|
|
||||||
ProofSubmitted* = object of Event
|
|
||||||
id*: SlotId
|
|
||||||
proof*: seq[byte]
|
|
||||||
|
|
||||||
|
|
||||||
proc collateralAmount*(storage: Storage): UInt256 {.contract, view.}
|
|
||||||
proc slashMisses*(storage: Storage): UInt256 {.contract, view.}
|
|
||||||
proc slashPercentage*(storage: Storage): UInt256 {.contract, view.}
|
|
||||||
proc minCollateralThreshold*(storage: Storage): UInt256 {.contract, view.}
|
|
||||||
|
|
||||||
proc deposit*(storage: Storage, amount: UInt256) {.contract.}
|
|
||||||
proc withdraw*(storage: Storage) {.contract.}
|
|
||||||
proc balanceOf*(storage: Storage, account: Address): UInt256 {.contract, view.}
|
|
||||||
|
|
||||||
proc requestStorage*(storage: Storage, request: StorageRequest) {.contract.}
|
|
||||||
proc fillSlot*(storage: Storage, requestId: RequestId, slotIndex: UInt256, proof: seq[byte]) {.contract.}
|
|
||||||
proc withdrawFunds*(storage: Storage, requestId: RequestId) {.contract.}
|
|
||||||
proc payoutSlot*(storage: Storage, requestId: RequestId, slotIndex: UInt256) {.contract.}
|
|
||||||
proc getRequest*(storage: Storage, id: RequestId): StorageRequest {.contract, view.}
|
|
||||||
proc getHost*(storage: Storage, id: SlotId): Address {.contract, view.}
|
|
||||||
|
|
||||||
proc myRequests*(storage: Storage): seq[RequestId] {.contract, view.}
|
|
||||||
proc state*(storage: Storage, requestId: RequestId): RequestState {.contract, view.}
|
|
||||||
proc requestEnd*(storage: Storage, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
|
||||||
|
|
||||||
proc proofPeriod*(storage: Storage): UInt256 {.contract, view.}
|
|
||||||
proc proofTimeout*(storage: Storage): UInt256 {.contract, view.}
|
|
||||||
|
|
||||||
proc proofEnd*(storage: Storage, id: SlotId): UInt256 {.contract, view.}
|
|
||||||
proc missingProofs*(storage: Storage, id: SlotId): UInt256 {.contract, view.}
|
|
||||||
proc isProofRequired*(storage: Storage, id: SlotId): bool {.contract, view.}
|
|
||||||
proc willProofBeRequired*(storage: Storage, id: SlotId): bool {.contract, view.}
|
|
||||||
proc getChallenge*(storage: Storage, id: SlotId): array[32, byte] {.contract, view.}
|
|
||||||
proc getPointer*(storage: Storage, id: SlotId): uint8 {.contract, view.}
|
|
||||||
|
|
||||||
proc submitProof*(storage: Storage, id: SlotId, proof: seq[byte]) {.contract.}
|
|
||||||
proc markProofAsMissing*(storage: Storage, id: SlotId, period: UInt256) {.contract.}
|
|
|
@ -1,10 +0,0 @@
|
||||||
import pkg/chronos
|
|
||||||
import pkg/stint
|
|
||||||
import pkg/ethers
|
|
||||||
|
|
||||||
type
|
|
||||||
TestToken* = ref object of Contract
|
|
||||||
|
|
||||||
proc mint*(token: TestToken, holder: Address, amount: UInt256) {.contract.}
|
|
||||||
proc approve*(token: TestToken, spender: Address, amount: UInt256) {.contract.}
|
|
||||||
proc balanceOf*(token: TestToken, account: Address): UInt256 {.contract, view.}
|
|
|
@ -8,21 +8,19 @@
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/algorithm
|
import std/algorithm
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
||||||
import pkg/libp2p
|
|
||||||
import pkg/libp2p/routing_record
|
|
||||||
import pkg/libp2p/signed_envelope
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/stew/shims/net
|
import pkg/stew/shims/net
|
||||||
import pkg/contractabi/address as ca
|
import pkg/contractabi/address as ca
|
||||||
import pkg/libp2pdht/discv5/protocol as discv5
|
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
||||||
|
|
||||||
import ./rng
|
import ./rng
|
||||||
import ./errors
|
import ./errors
|
||||||
import ./formats
|
import ./logutils
|
||||||
|
|
||||||
export discv5
|
export discv5
|
||||||
|
|
||||||
|
@ -35,10 +33,10 @@ logScope:
|
||||||
|
|
||||||
type
|
type
|
||||||
Discovery* = ref object of RootObj
|
Discovery* = ref object of RootObj
|
||||||
protocol: discv5.Protocol # dht protocol
|
protocol*: discv5.Protocol # dht protocol
|
||||||
key: PrivateKey # private key
|
key: PrivateKey # private key
|
||||||
peerId: PeerId # the peer id of the local node
|
peerId: PeerId # the peer id of the local node
|
||||||
announceAddrs: seq[MultiAddress] # addresses announced as part of the provider records
|
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
||||||
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
|
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
|
||||||
# address that the node can be connected on
|
# address that the node can be connected on
|
||||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||||
|
@ -57,7 +55,10 @@ proc toNodeId*(host: ca.Address): NodeId =
|
||||||
|
|
||||||
proc findPeer*(
|
proc findPeer*(
|
||||||
d: Discovery,
|
d: Discovery,
|
||||||
peerId: PeerID): Future[?PeerRecord] {.async.} =
|
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||||
|
trace "protocol.resolve..."
|
||||||
|
## Find peer using the given Discovery object
|
||||||
|
##
|
||||||
let
|
let
|
||||||
node = await d.protocol.resolve(toNodeId(peerId))
|
node = await d.protocol.resolve(toNodeId(peerId))
|
||||||
|
|
||||||
|
@ -72,27 +73,22 @@ method find*(
|
||||||
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||||
## Find block providers
|
## Find block providers
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Finding providers for block", cid
|
|
||||||
without providers =?
|
without providers =?
|
||||||
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||||
trace "Error finding providers for block", cid, error = error.msg
|
warn "Error finding providers for block", cid, error = error.msg
|
||||||
|
|
||||||
return providers
|
return providers.filterIt( not (it.data.peerId == d.peerId) )
|
||||||
|
|
||||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||||
## Provide a bock Cid
|
## Provide a block Cid
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Providing block", cid
|
|
||||||
let
|
let
|
||||||
nodes = await d.protocol.addProvider(
|
nodes = await d.protocol.addProvider(
|
||||||
cid.toNodeId(), d.providerRecord.get)
|
cid.toNodeId(), d.providerRecord.get)
|
||||||
|
|
||||||
if nodes.len <= 0:
|
if nodes.len <= 0:
|
||||||
trace "Couldn't provide to any nodes!"
|
warn "Couldn't provide to any nodes!"
|
||||||
|
|
||||||
trace "Provided to nodes", nodes = nodes.len
|
|
||||||
|
|
||||||
method find*(
|
method find*(
|
||||||
d: Discovery,
|
d: Discovery,
|
||||||
|
@ -126,7 +122,9 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
||||||
if nodes.len > 0:
|
if nodes.len > 0:
|
||||||
trace "Provided to nodes", nodes = nodes.len
|
trace "Provided to nodes", nodes = nodes.len
|
||||||
|
|
||||||
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base.} =
|
method removeProvider*(
|
||||||
|
d: Discovery,
|
||||||
|
peerId: PeerId): Future[void] {.base.} =
|
||||||
## Remove provider from providers table
|
## Remove provider from providers table
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -160,6 +158,10 @@ proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
|
||||||
IpTransportProtocol.udpProtocol,
|
IpTransportProtocol.udpProtocol,
|
||||||
port)])).expect("Should construct signed record").some
|
port)])).expect("Should construct signed record").some
|
||||||
|
|
||||||
|
if not d.protocol.isNil:
|
||||||
|
d.protocol.updateRecord(d.dhtRecord)
|
||||||
|
.expect("Should update SPR")
|
||||||
|
|
||||||
proc start*(d: Discovery) {.async.} =
|
proc start*(d: Discovery) {.async.} =
|
||||||
d.protocol.open()
|
d.protocol.open()
|
||||||
await d.protocol.start()
|
await d.protocol.start()
|
||||||
|
@ -168,22 +170,36 @@ proc stop*(d: Discovery) {.async.} =
|
||||||
await d.protocol.closeWait()
|
await d.protocol.closeWait()
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type Discovery,
|
T: type Discovery,
|
||||||
key: PrivateKey,
|
key: PrivateKey,
|
||||||
bindIp = ValidIpAddress.init(IPv4_any()),
|
bindIp = ValidIpAddress.init(IPv4_any()),
|
||||||
bindPort = 0.Port,
|
bindPort = 0.Port,
|
||||||
announceAddrs: openArray[MultiAddress],
|
announceAddrs: openArray[MultiAddress],
|
||||||
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
||||||
store: Datastore = SQLiteDatastore.new(Memory)
|
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
|
||||||
.expect("Should not fail!")): T =
|
): Discovery =
|
||||||
|
## Create a new Discovery node instance for the given key and datastore
|
||||||
|
##
|
||||||
|
|
||||||
var
|
var
|
||||||
self = T(
|
self = Discovery(
|
||||||
key: key,
|
key: key,
|
||||||
peerId: PeerId.init(key).expect("Should construct PeerId"))
|
peerId: PeerId.init(key).expect("Should construct PeerId"))
|
||||||
|
|
||||||
self.updateAnnounceRecord(announceAddrs)
|
self.updateAnnounceRecord(announceAddrs)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------
|
||||||
|
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
|
||||||
|
# and figure out proper solution.
|
||||||
|
let discoveryConfig = DiscoveryConfig(
|
||||||
|
tableIpLimits: TableIpLimits(
|
||||||
|
tableIpLimit: high(uint),
|
||||||
|
bucketIpLimit:high(uint)
|
||||||
|
),
|
||||||
|
bitsPerHop: DefaultBitsPerHop
|
||||||
|
)
|
||||||
|
# --------------------------------------------------------------------------
|
||||||
|
|
||||||
self.protocol = newProtocol(
|
self.protocol = newProtocol(
|
||||||
key,
|
key,
|
||||||
bindIp = bindIp.toNormalIp,
|
bindIp = bindIp.toNormalIp,
|
||||||
|
@ -191,6 +207,7 @@ proc new*(
|
||||||
record = self.providerRecord.get,
|
record = self.providerRecord.get,
|
||||||
bootstrapRecords = bootstrapNodes,
|
bootstrapRecords = bootstrapNodes,
|
||||||
rng = Rng.instance(),
|
rng = Rng.instance(),
|
||||||
providers = ProvidersManager.new(store))
|
providers = ProvidersManager.new(store),
|
||||||
|
config = discoveryConfig)
|
||||||
|
|
||||||
self
|
self
|
||||||
|
|
|
@ -12,8 +12,14 @@ import ./erasure/backends/leopard
|
||||||
|
|
||||||
export erasure
|
export erasure
|
||||||
|
|
||||||
func leoEncoderProvider*(size, buffers, parity: int): EncoderBackend {.raises: [Defect].} =
|
func leoEncoderProvider*(
|
||||||
|
size, buffers, parity: int
|
||||||
|
): EncoderBackend {.raises: [Defect].} =
|
||||||
|
## create new Leo Encoder
|
||||||
LeoEncoderBackend.new(size, buffers, parity)
|
LeoEncoderBackend.new(size, buffers, parity)
|
||||||
|
|
||||||
func leoDecoderProvider*(size, buffers, parity: int): DecoderBackend {.raises: [Defect].} =
|
func leoDecoderProvider*(
|
||||||
LeoDecoderBackend.new(size, buffers, parity)
|
size, buffers, parity: int
|
||||||
|
): DecoderBackend {.raises: [Defect].} =
|
||||||
|
## create new Leo Decoder
|
||||||
|
LeoDecoderBackend.new(size, buffers, parity)
|
||||||
|
|
|
@ -0,0 +1,225 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
|
import pkg/taskpools
|
||||||
|
import pkg/taskpools/flowvars
|
||||||
|
import pkg/chronos
|
||||||
|
import pkg/chronos/threadsync
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import ./backend
|
||||||
|
import ../errors
|
||||||
|
import ../logutils
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "codex asyncerasure"
|
||||||
|
|
||||||
|
const
|
||||||
|
CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal
|
||||||
|
CompletitionRetryDelay = 10.millis
|
||||||
|
|
||||||
|
type
|
||||||
|
EncoderBackendPtr = ptr EncoderBackend
|
||||||
|
DecoderBackendPtr = ptr DecoderBackend
|
||||||
|
|
||||||
|
# Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy
|
||||||
|
EncodeTaskArgs = object
|
||||||
|
signal: ThreadSignalPtr
|
||||||
|
backend: EncoderBackendPtr
|
||||||
|
blockSize: int
|
||||||
|
ecM: int
|
||||||
|
|
||||||
|
DecodeTaskArgs = object
|
||||||
|
signal: ThreadSignalPtr
|
||||||
|
backend: DecoderBackendPtr
|
||||||
|
blockSize: int
|
||||||
|
ecK: int
|
||||||
|
|
||||||
|
SharedArrayHolder*[T] = object
|
||||||
|
data: ptr UncheckedArray[T]
|
||||||
|
size: int
|
||||||
|
|
||||||
|
EncodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||||
|
DecodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||||
|
|
||||||
|
proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
|
||||||
|
var
|
||||||
|
data = data.unsafeAddr
|
||||||
|
parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize))
|
||||||
|
|
||||||
|
try:
|
||||||
|
let res = args.backend[].encode(data[], parity)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
let
|
||||||
|
resDataSize = parity.len * args.blockSize
|
||||||
|
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||||
|
arrHolder = SharedArrayHolder[byte](
|
||||||
|
data: resData,
|
||||||
|
size: resDataSize
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in 0..<parity.len:
|
||||||
|
copyMem(addr resData[i * args.blockSize], addr parity[i][0], args.blockSize)
|
||||||
|
|
||||||
|
return ok(arrHolder)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
except CatchableError as exception:
|
||||||
|
return err(exception.msg.cstring)
|
||||||
|
finally:
|
||||||
|
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||||
|
error "Error firing signal", msg = err.msg
|
||||||
|
|
||||||
|
proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]): DecodeTaskResult =
|
||||||
|
var
|
||||||
|
data = data.unsafeAddr
|
||||||
|
parity = parity.unsafeAddr
|
||||||
|
recovered = newSeqWith[seq[byte]](args.ecK, newSeq[byte](args.blockSize))
|
||||||
|
|
||||||
|
try:
|
||||||
|
let res = args.backend[].decode(data[], parity[], recovered)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
let
|
||||||
|
resDataSize = recovered.len * args.blockSize
|
||||||
|
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||||
|
arrHolder = SharedArrayHolder[byte](
|
||||||
|
data: resData,
|
||||||
|
size: resDataSize
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in 0..<recovered.len:
|
||||||
|
copyMem(addr resData[i * args.blockSize], addr recovered[i][0], args.blockSize)
|
||||||
|
|
||||||
|
return ok(arrHolder)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
except CatchableError as exception:
|
||||||
|
return err(exception.msg.cstring)
|
||||||
|
finally:
|
||||||
|
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||||
|
error "Error firing signal", msg = err.msg
|
||||||
|
|
||||||
|
proc proxySpawnEncodeTask(
|
||||||
|
tp: Taskpool,
|
||||||
|
args: EncodeTaskArgs,
|
||||||
|
data: ref seq[seq[byte]]
|
||||||
|
): Flowvar[EncodeTaskResult] =
|
||||||
|
# FIXME Uncomment the code below after addressing an issue:
|
||||||
|
# https://github.com/codex-storage/nim-codex/issues/854
|
||||||
|
|
||||||
|
# tp.spawn encodeTask(args, data[])
|
||||||
|
|
||||||
|
let fv = EncodeTaskResult.newFlowVar
|
||||||
|
fv.readyWith(encodeTask(args, data[]))
|
||||||
|
return fv
|
||||||
|
|
||||||
|
proc proxySpawnDecodeTask(
|
||||||
|
tp: Taskpool,
|
||||||
|
args: DecodeTaskArgs,
|
||||||
|
data: ref seq[seq[byte]],
|
||||||
|
parity: ref seq[seq[byte]]
|
||||||
|
): Flowvar[DecodeTaskResult] =
|
||||||
|
# FIXME Uncomment the code below after addressing an issue:
|
||||||
|
# https://github.com/codex-storage/nim-codex/issues/854
|
||||||
|
|
||||||
|
# tp.spawn decodeTask(args, data[], parity[])
|
||||||
|
|
||||||
|
let fv = DecodeTaskResult.newFlowVar
|
||||||
|
fv.readyWith(decodeTask(args, data[], parity[]))
|
||||||
|
return fv
|
||||||
|
|
||||||
|
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
|
||||||
|
await wait(signal)
|
||||||
|
|
||||||
|
var
|
||||||
|
res: T
|
||||||
|
awaitTotal: Duration
|
||||||
|
while awaitTotal < CompletitionTimeout:
|
||||||
|
if handle.tryComplete(res):
|
||||||
|
return success(res)
|
||||||
|
else:
|
||||||
|
awaitTotal += CompletitionRetryDelay
|
||||||
|
await sleepAsync(CompletitionRetryDelay)
|
||||||
|
|
||||||
|
return failure("Task signaled finish but didn't return any result within " & $CompletitionRetryDelay)
|
||||||
|
|
||||||
|
proc asyncEncode*(
|
||||||
|
tp: Taskpool,
|
||||||
|
backend: EncoderBackend,
|
||||||
|
data: ref seq[seq[byte]],
|
||||||
|
blockSize: int,
|
||||||
|
ecM: int
|
||||||
|
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||||
|
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
blockSize = data[0].len
|
||||||
|
args = EncodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM)
|
||||||
|
handle = proxySpawnEncodeTask(tp, args, data)
|
||||||
|
|
||||||
|
without res =? await awaitResult(signal, handle), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
var parity = seq[seq[byte]].new()
|
||||||
|
parity[].setLen(ecM)
|
||||||
|
|
||||||
|
for i in 0..<parity[].len:
|
||||||
|
parity[i] = newSeq[byte](blockSize)
|
||||||
|
copyMem(addr parity[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||||
|
|
||||||
|
deallocShared(res.value.data)
|
||||||
|
|
||||||
|
return success(parity)
|
||||||
|
else:
|
||||||
|
return failure($res.error)
|
||||||
|
finally:
|
||||||
|
if err =? signal.close().mapFailure.errorOption():
|
||||||
|
error "Error closing signal", msg = $err.msg
|
||||||
|
|
||||||
|
proc asyncDecode*(
|
||||||
|
tp: Taskpool,
|
||||||
|
backend: DecoderBackend,
|
||||||
|
data, parity: ref seq[seq[byte]],
|
||||||
|
blockSize: int
|
||||||
|
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||||
|
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
ecK = data[].len
|
||||||
|
args = DecodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK)
|
||||||
|
handle = proxySpawnDecodeTask(tp, args, data, parity)
|
||||||
|
|
||||||
|
without res =? await awaitResult(signal, handle), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
var recovered = seq[seq[byte]].new()
|
||||||
|
recovered[].setLen(ecK)
|
||||||
|
|
||||||
|
for i in 0..<recovered[].len:
|
||||||
|
recovered[i] = newSeq[byte](blockSize)
|
||||||
|
copyMem(addr recovered[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||||
|
|
||||||
|
deallocShared(res.value.data)
|
||||||
|
|
||||||
|
return success(recovered)
|
||||||
|
else:
|
||||||
|
return failure($res.error)
|
||||||
|
finally:
|
||||||
|
if err =? signal.close().mapFailure.errorOption():
|
||||||
|
error "Error closing signal", msg = $err.msg
|
|
@ -11,30 +11,37 @@ import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import ../manifest
|
|
||||||
import ../stores
|
import ../stores
|
||||||
|
|
||||||
type
|
type
|
||||||
Backend* = ref object of RootObj
|
ErasureBackend* = ref object of RootObj
|
||||||
blockSize*: int # block size in bytes
|
blockSize*: int # block size in bytes
|
||||||
buffers*: int # number of original pieces
|
buffers*: int # number of original pieces
|
||||||
parity*: int # number of redundancy pieces
|
parity*: int # number of redundancy pieces
|
||||||
|
|
||||||
EncoderBackend* = ref object of Backend
|
EncoderBackend* = ref object of ErasureBackend
|
||||||
DecoderBackend* = ref object of Backend
|
DecoderBackend* = ref object of ErasureBackend
|
||||||
|
|
||||||
method release*(self: Backend) {.base.} =
|
method release*(self: ErasureBackend) {.base.} =
|
||||||
|
## release the backend
|
||||||
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
|
|
||||||
method encode*(
|
method encode*(
|
||||||
self: EncoderBackend,
|
self: EncoderBackend,
|
||||||
buffers,
|
buffers,
|
||||||
parity: var openArray[seq[byte]]): Result[void, cstring] {.base.} =
|
parity: var openArray[seq[byte]]
|
||||||
|
): Result[void, cstring] {.base.} =
|
||||||
|
## encode buffers using a backend
|
||||||
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
|
|
||||||
method decode*(
|
method decode*(
|
||||||
self: DecoderBackend,
|
self: DecoderBackend,
|
||||||
buffers,
|
buffers,
|
||||||
parity,
|
parity,
|
||||||
recovered: var openArray[seq[byte]]): Result[void, cstring] {.base.} =
|
recovered: var openArray[seq[byte]]
|
||||||
|
): Result[void, cstring] {.base.} =
|
||||||
|
## decode buffers using a backend
|
||||||
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
|
|
|
@ -25,6 +25,7 @@ method encode*(
|
||||||
self: LeoEncoderBackend,
|
self: LeoEncoderBackend,
|
||||||
data,
|
data,
|
||||||
parity: var openArray[seq[byte]]): Result[void, cstring] =
|
parity: var openArray[seq[byte]]): Result[void, cstring] =
|
||||||
|
## Encode data using Leopard backend
|
||||||
|
|
||||||
if parity.len == 0:
|
if parity.len == 0:
|
||||||
return ok()
|
return ok()
|
||||||
|
@ -45,8 +46,10 @@ method decode*(
|
||||||
data,
|
data,
|
||||||
parity,
|
parity,
|
||||||
recovered: var openArray[seq[byte]]): Result[void, cstring] =
|
recovered: var openArray[seq[byte]]): Result[void, cstring] =
|
||||||
|
## Decode data using given Leopard backend
|
||||||
|
|
||||||
var decoder = if self.decoder.isNone:
|
var decoder =
|
||||||
|
if self.decoder.isNone:
|
||||||
self.decoder = (? LeoDecoder.init(
|
self.decoder = (? LeoDecoder.init(
|
||||||
self.blockSize,
|
self.blockSize,
|
||||||
self.buffers,
|
self.buffers,
|
||||||
|
@ -65,22 +68,26 @@ method release*(self: LeoDecoderBackend) =
|
||||||
if self.decoder.isSome:
|
if self.decoder.isSome:
|
||||||
self.decoder.get().free()
|
self.decoder.get().free()
|
||||||
|
|
||||||
func new*(
|
proc new*(
|
||||||
T: type LeoEncoderBackend,
|
T: type LeoEncoderBackend,
|
||||||
blockSize,
|
blockSize,
|
||||||
buffers,
|
buffers,
|
||||||
parity: int): T =
|
parity: int): LeoEncoderBackend =
|
||||||
T(
|
## Create an instance of an Leopard Encoder backend
|
||||||
|
##
|
||||||
|
LeoEncoderBackend(
|
||||||
blockSize: blockSize,
|
blockSize: blockSize,
|
||||||
buffers: buffers,
|
buffers: buffers,
|
||||||
parity: parity)
|
parity: parity)
|
||||||
|
|
||||||
func new*(
|
proc new*(
|
||||||
T: type LeoDecoderBackend,
|
T: type LeoDecoderBackend,
|
||||||
blockSize,
|
blockSize,
|
||||||
buffers,
|
buffers,
|
||||||
parity: int): T =
|
parity: int): LeoDecoderBackend =
|
||||||
T(
|
## Create an instance of an Leopard Decoder backend
|
||||||
|
##
|
||||||
|
LeoDecoderBackend(
|
||||||
blockSize: blockSize,
|
blockSize: blockSize,
|
||||||
buffers: buffers,
|
buffers: buffers,
|
||||||
parity: parity)
|
parity: parity)
|
||||||
|
|
|
@ -12,16 +12,27 @@ import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
import std/sugar
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
import pkg/libp2p/[multicodec, cid, multihash]
|
||||||
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
|
import ../logutils
|
||||||
import ../manifest
|
import ../manifest
|
||||||
|
import ../merkletree
|
||||||
import ../stores
|
import ../stores
|
||||||
import ../errors
|
|
||||||
import ../blocktype as bt
|
import ../blocktype as bt
|
||||||
|
import ../utils
|
||||||
|
import ../utils/asynciter
|
||||||
|
import ../indexingstrategy
|
||||||
|
import ../errors
|
||||||
|
|
||||||
|
import pkg/stew/byteutils
|
||||||
|
|
||||||
import ./backend
|
import ./backend
|
||||||
|
import ./asyncbackend
|
||||||
|
|
||||||
export backend
|
export backend
|
||||||
|
|
||||||
|
@ -62,86 +73,275 @@ type
|
||||||
encoderProvider*: EncoderProvider
|
encoderProvider*: EncoderProvider
|
||||||
decoderProvider*: DecoderProvider
|
decoderProvider*: DecoderProvider
|
||||||
store*: BlockStore
|
store*: BlockStore
|
||||||
|
taskpool: Taskpool
|
||||||
|
|
||||||
proc encode*(
|
EncodingParams = object
|
||||||
|
ecK: Natural
|
||||||
|
ecM: Natural
|
||||||
|
rounded: Natural
|
||||||
|
steps: Natural
|
||||||
|
blocksCount: Natural
|
||||||
|
strategy: StrategyType
|
||||||
|
|
||||||
|
ErasureError* = object of CodexError
|
||||||
|
InsufficientBlocksError* = object of ErasureError
|
||||||
|
# Minimum size, in bytes, that the dataset must have had
|
||||||
|
# for the encoding request to have succeeded with the parameters
|
||||||
|
# provided.
|
||||||
|
minSize*: NBytes
|
||||||
|
|
||||||
|
func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||||
|
## Convert an index to a position in the encoded
|
||||||
|
## dataset
|
||||||
|
## `idx` - the index to convert
|
||||||
|
## `step` - the current step
|
||||||
|
## `pos` - the position in the encoded dataset
|
||||||
|
##
|
||||||
|
|
||||||
|
(idx - step) div steps
|
||||||
|
|
||||||
|
proc getPendingBlocks(
|
||||||
self: Erasure,
|
self: Erasure,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
blocks: int,
|
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
|
||||||
parity: int): Future[?!Manifest] {.async.} =
|
## Get pending blocks iterator
|
||||||
## Encode a manifest into one that is erasure protected.
|
|
||||||
##
|
##
|
||||||
## `manifest` - the original manifest to be encoded
|
|
||||||
## `blocks` - the number of blocks to be encoded - K
|
|
||||||
## `parity` - the number of parity blocks to generate - M
|
|
||||||
##
|
|
||||||
|
|
||||||
logScope:
|
|
||||||
original_cid = manifest.cid.get()
|
|
||||||
original_len = manifest.len
|
|
||||||
blocks = blocks
|
|
||||||
parity = parity
|
|
||||||
|
|
||||||
trace "Erasure coding manifest", blocks, parity
|
|
||||||
without var encoded =? Manifest.new(manifest, blocks, parity), error:
|
|
||||||
trace "Unable to create manifest", msg = error.msg
|
|
||||||
return error.failure
|
|
||||||
|
|
||||||
logScope:
|
|
||||||
steps = encoded.steps
|
|
||||||
rounded_blocks = encoded.rounded
|
|
||||||
new_manifest = encoded.len
|
|
||||||
|
|
||||||
var
|
var
|
||||||
encoder = self.encoderProvider(manifest.blockSize, blocks, parity)
|
# request blocks from the store
|
||||||
|
pendingBlocks = indicies.map( (i: int) =>
|
||||||
|
self.store.getBlock(
|
||||||
|
BlockAddress.init(manifest.treeCid, i)
|
||||||
|
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
|
||||||
|
)
|
||||||
|
|
||||||
|
proc isFinished(): bool = pendingBlocks.len == 0
|
||||||
|
|
||||||
|
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
||||||
|
let completedFut = await one(pendingBlocks)
|
||||||
|
if (let i = pendingBlocks.find(completedFut); i >= 0):
|
||||||
|
pendingBlocks.del(i)
|
||||||
|
return await completedFut
|
||||||
|
else:
|
||||||
|
let (_, index) = await completedFut
|
||||||
|
raise newException(
|
||||||
|
CatchableError,
|
||||||
|
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
|
||||||
|
|
||||||
|
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
|
||||||
|
|
||||||
|
proc prepareEncodingData(
|
||||||
|
self: Erasure,
|
||||||
|
manifest: Manifest,
|
||||||
|
params: EncodingParams,
|
||||||
|
step: Natural,
|
||||||
|
data: ref seq[seq[byte]],
|
||||||
|
cids: ref seq[Cid],
|
||||||
|
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
|
||||||
|
## Prepare data for encoding
|
||||||
|
##
|
||||||
|
|
||||||
|
let
|
||||||
|
strategy = params.strategy.init(
|
||||||
|
firstIndex = 0,
|
||||||
|
lastIndex = params.rounded - 1,
|
||||||
|
iterations = params.steps
|
||||||
|
)
|
||||||
|
indicies = toSeq(strategy.getIndicies(step))
|
||||||
|
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
||||||
|
|
||||||
|
var resolved = 0
|
||||||
|
for fut in pendingBlocksIter:
|
||||||
|
let (blkOrErr, idx) = await fut
|
||||||
|
without blk =? blkOrErr, err:
|
||||||
|
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
||||||
|
continue
|
||||||
|
|
||||||
|
let pos = indexToPos(params.steps, idx, step)
|
||||||
|
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||||
|
cids[idx] = blk.cid
|
||||||
|
|
||||||
|
resolved.inc()
|
||||||
|
|
||||||
|
for idx in indicies.filterIt(it >= manifest.blocksCount):
|
||||||
|
let pos = indexToPos(params.steps, idx, step)
|
||||||
|
trace "Padding with empty block", idx
|
||||||
|
shallowCopy(data[pos], emptyBlock)
|
||||||
|
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
|
||||||
|
return failure(err)
|
||||||
|
cids[idx] = emptyBlockCid
|
||||||
|
|
||||||
|
success(resolved.Natural)
|
||||||
|
|
||||||
|
proc prepareDecodingData(
|
||||||
|
self: Erasure,
|
||||||
|
encoded: Manifest,
|
||||||
|
step: Natural,
|
||||||
|
data: ref seq[seq[byte]],
|
||||||
|
parityData: ref seq[seq[byte]],
|
||||||
|
cids: ref seq[Cid],
|
||||||
|
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
|
||||||
|
## Prepare data for decoding
|
||||||
|
## `encoded` - the encoded manifest
|
||||||
|
## `step` - the current step
|
||||||
|
## `data` - the data to be prepared
|
||||||
|
## `parityData` - the parityData to be prepared
|
||||||
|
## `cids` - cids of prepared data
|
||||||
|
## `emptyBlock` - the empty block to be used for padding
|
||||||
|
##
|
||||||
|
|
||||||
|
let
|
||||||
|
strategy = encoded.protectedStrategy.init(
|
||||||
|
firstIndex = 0,
|
||||||
|
lastIndex = encoded.blocksCount - 1,
|
||||||
|
iterations = encoded.steps
|
||||||
|
)
|
||||||
|
indicies = toSeq(strategy.getIndicies(step))
|
||||||
|
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
||||||
|
|
||||||
|
var
|
||||||
|
dataPieces = 0
|
||||||
|
parityPieces = 0
|
||||||
|
resolved = 0
|
||||||
|
for fut in pendingBlocksIter:
|
||||||
|
# Continue to receive blocks until we have just enough for decoding
|
||||||
|
# or no more blocks can arrive
|
||||||
|
if resolved >= encoded.ecK:
|
||||||
|
break
|
||||||
|
|
||||||
|
let (blkOrErr, idx) = await fut
|
||||||
|
without blk =? blkOrErr, err:
|
||||||
|
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||||
|
continue
|
||||||
|
|
||||||
|
let
|
||||||
|
pos = indexToPos(encoded.steps, idx, step)
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
cid = blk.cid
|
||||||
|
idx = idx
|
||||||
|
pos = pos
|
||||||
|
step = step
|
||||||
|
empty = blk.isEmpty
|
||||||
|
|
||||||
|
cids[idx] = blk.cid
|
||||||
|
if idx >= encoded.rounded:
|
||||||
|
trace "Retrieved parity block"
|
||||||
|
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
|
||||||
|
parityPieces.inc
|
||||||
|
else:
|
||||||
|
trace "Retrieved data block"
|
||||||
|
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||||
|
dataPieces.inc
|
||||||
|
|
||||||
|
resolved.inc
|
||||||
|
|
||||||
|
return success (dataPieces.Natural, parityPieces.Natural)
|
||||||
|
|
||||||
|
proc init*(
|
||||||
|
_: type EncodingParams,
|
||||||
|
manifest: Manifest,
|
||||||
|
ecK: Natural, ecM: Natural,
|
||||||
|
strategy: StrategyType): ?!EncodingParams =
|
||||||
|
if ecK > manifest.blocksCount:
|
||||||
|
let exc = (ref InsufficientBlocksError)(
|
||||||
|
msg: "Unable to encode manifest, not enough blocks, ecK = " &
|
||||||
|
$ecK &
|
||||||
|
", blocksCount = " &
|
||||||
|
$manifest.blocksCount,
|
||||||
|
minSize: ecK.NBytes * manifest.blockSize)
|
||||||
|
return failure(exc)
|
||||||
|
|
||||||
|
let
|
||||||
|
rounded = roundUp(manifest.blocksCount, ecK)
|
||||||
|
steps = divUp(rounded, ecK)
|
||||||
|
blocksCount = rounded + (steps * ecM)
|
||||||
|
|
||||||
|
success EncodingParams(
|
||||||
|
ecK: ecK,
|
||||||
|
ecM: ecM,
|
||||||
|
rounded: rounded,
|
||||||
|
steps: steps,
|
||||||
|
blocksCount: blocksCount,
|
||||||
|
strategy: strategy
|
||||||
|
)
|
||||||
|
|
||||||
|
proc encodeData(
|
||||||
|
self: Erasure,
|
||||||
|
manifest: Manifest,
|
||||||
|
params: EncodingParams
|
||||||
|
): Future[?!Manifest] {.async.} =
|
||||||
|
## Encode blocks pointed to by the protected manifest
|
||||||
|
##
|
||||||
|
## `manifest` - the manifest to encode
|
||||||
|
##
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
steps = params.steps
|
||||||
|
rounded_blocks = params.rounded
|
||||||
|
blocks_count = params.blocksCount
|
||||||
|
ecK = params.ecK
|
||||||
|
ecM = params.ecM
|
||||||
|
|
||||||
|
var
|
||||||
|
cids = seq[Cid].new()
|
||||||
|
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
|
||||||
|
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||||
|
|
||||||
|
cids[].setLen(params.blocksCount)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for i in 0..<encoded.steps:
|
for step in 0..<params.steps:
|
||||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||||
var
|
var
|
||||||
data = newSeq[seq[byte]](blocks) # number of blocks to encode
|
data = seq[seq[byte]].new() # number of blocks to encode
|
||||||
parityData = newSeqWith[seq[byte]](parity, newSeq[byte](manifest.blockSize))
|
|
||||||
# calculate block indexes to retrieve
|
|
||||||
blockIdx = toSeq(countup(i, encoded.rounded - 1, encoded.steps))
|
|
||||||
# request all blocks from the store
|
|
||||||
dataBlocks = await allFinished(
|
|
||||||
blockIdx.mapIt( self.store.getBlock(encoded[it]) ))
|
|
||||||
|
|
||||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
data[].setLen(params.ecK)
|
||||||
# other events to be processed, this should be addressed
|
|
||||||
# by threading
|
|
||||||
await sleepAsync(10.millis)
|
|
||||||
|
|
||||||
for j in 0..<blocks:
|
without resolved =?
|
||||||
let idx = blockIdx[j]
|
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
||||||
if idx < manifest.len:
|
trace "Unable to prepare data", error = err.msg
|
||||||
without blk =? (await dataBlocks[j]), error:
|
return failure(err)
|
||||||
trace "Unable to retrieve block", error = error.msg
|
|
||||||
return failure error
|
|
||||||
|
|
||||||
trace "Encoding block", cid = blk.cid, pos = idx
|
trace "Erasure coding data", data = data[].len, parity = params.ecM
|
||||||
shallowCopy(data[j], blk.data)
|
|
||||||
else:
|
|
||||||
trace "Padding with empty block", pos = idx
|
|
||||||
data[j] = newSeq[byte](manifest.blockSize)
|
|
||||||
|
|
||||||
trace "Erasure coding data", data = data.len, parity = parityData.len
|
without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err:
|
||||||
|
trace "Error encoding data", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
let res = encoder.encode(data, parityData);
|
var idx = params.rounded + step
|
||||||
if res.isErr:
|
for j in 0..<params.ecM:
|
||||||
trace "Unable to encode manifest!", error = $res.error
|
without blk =? bt.Block.new(parity[j]), error:
|
||||||
return failure($res.error)
|
|
||||||
|
|
||||||
for j in 0..<parity:
|
|
||||||
let idx = encoded.rounded + blockIdx[j]
|
|
||||||
without blk =? bt.Block.new(parityData[j]), error:
|
|
||||||
trace "Unable to create parity block", err = error.msg
|
trace "Unable to create parity block", err = error.msg
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
trace "Adding parity block", cid = blk.cid, pos = idx
|
trace "Adding parity block", cid = blk.cid, idx
|
||||||
encoded[idx] = blk.cid
|
cids[idx] = blk.cid
|
||||||
if isErr (await self.store.putBlock(blk)):
|
if isErr (await self.store.putBlock(blk)):
|
||||||
trace "Unable to store block!", cid = blk.cid
|
trace "Unable to store block!", cid = blk.cid
|
||||||
return failure("Unable to store block!")
|
return failure("Unable to store block!")
|
||||||
|
idx.inc(params.steps)
|
||||||
|
|
||||||
|
without tree =? CodexTree.init(cids[]), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
without treeCid =? tree.rootCid, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
let encodedManifest = Manifest.new(
|
||||||
|
manifest = manifest,
|
||||||
|
treeCid = treeCid,
|
||||||
|
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
||||||
|
ecK = params.ecK,
|
||||||
|
ecM = params.ecM,
|
||||||
|
strategy = params.strategy
|
||||||
|
)
|
||||||
|
|
||||||
|
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
||||||
|
success encodedManifest
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
trace "Erasure coding encoding cancelled"
|
trace "Erasure coding encoding cancelled"
|
||||||
raise exc # cancellation needs to be propagated
|
raise exc # cancellation needs to be propagated
|
||||||
|
@ -151,7 +351,26 @@ proc encode*(
|
||||||
finally:
|
finally:
|
||||||
encoder.release()
|
encoder.release()
|
||||||
|
|
||||||
return encoded.success
|
proc encode*(
|
||||||
|
self: Erasure,
|
||||||
|
manifest: Manifest,
|
||||||
|
blocks: Natural,
|
||||||
|
parity: Natural,
|
||||||
|
strategy = SteppedStrategy): Future[?!Manifest] {.async.} =
|
||||||
|
## Encode a manifest into one that is erasure protected.
|
||||||
|
##
|
||||||
|
## `manifest` - the original manifest to be encoded
|
||||||
|
## `blocks` - the number of blocks to be encoded - K
|
||||||
|
## `parity` - the number of parity blocks to generate - M
|
||||||
|
##
|
||||||
|
|
||||||
|
without params =? EncodingParams.init(manifest, blocks.int, parity.int, strategy), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
without encodedManifest =? await self.encodeData(manifest, params), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
return success encodedManifest
|
||||||
|
|
||||||
proc decode*(
|
proc decode*(
|
||||||
self: Erasure,
|
self: Erasure,
|
||||||
|
@ -166,85 +385,53 @@ proc decode*(
|
||||||
logScope:
|
logScope:
|
||||||
steps = encoded.steps
|
steps = encoded.steps
|
||||||
rounded_blocks = encoded.rounded
|
rounded_blocks = encoded.rounded
|
||||||
new_manifest = encoded.len
|
new_manifest = encoded.blocksCount
|
||||||
|
|
||||||
var
|
var
|
||||||
decoder = self.decoderProvider(encoded.blockSize, encoded.K, encoded.M)
|
cids = seq[Cid].new()
|
||||||
|
recoveredIndices = newSeq[Natural]()
|
||||||
|
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
|
||||||
|
emptyBlock = newSeq[byte](encoded.blockSize.int)
|
||||||
|
|
||||||
|
cids[].setLen(encoded.blocksCount)
|
||||||
try:
|
try:
|
||||||
for i in 0..<encoded.steps:
|
for step in 0..<encoded.steps:
|
||||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
|
||||||
let
|
|
||||||
# calculate block indexes to retrieve
|
|
||||||
blockIdx = toSeq(countup(i, encoded.len - 1, encoded.steps))
|
|
||||||
# request all blocks from the store
|
|
||||||
pendingBlocks = blockIdx.mapIt(
|
|
||||||
self.store.getBlock(encoded[it]) # Get the data blocks (first K)
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
|
||||||
# other events to be processed, this should be addressed
|
|
||||||
# by threading
|
|
||||||
await sleepAsync(10.millis)
|
|
||||||
|
|
||||||
var
|
var
|
||||||
data = newSeq[seq[byte]](encoded.K) # number of blocks to encode
|
data = seq[seq[byte]].new()
|
||||||
parityData = newSeq[seq[byte]](encoded.M)
|
parity = seq[seq[byte]].new()
|
||||||
recovered = newSeqWith[seq[byte]](encoded.K, newSeq[byte](encoded.blockSize))
|
|
||||||
idxPendingBlocks = pendingBlocks # copy futures to make using with `one` easier
|
|
||||||
emptyBlock = newSeq[byte](encoded.blockSize)
|
|
||||||
resolved = 0
|
|
||||||
|
|
||||||
while true:
|
data[].setLen(encoded.ecK) # set len to K
|
||||||
# Continue to receive blocks until we have just enough for decoding
|
parity[].setLen(encoded.ecM) # set len to M
|
||||||
# or no more blocks can arrive
|
|
||||||
if (resolved >= encoded.K) or (idxPendingBlocks.len == 0):
|
|
||||||
break
|
|
||||||
|
|
||||||
let
|
without (dataPieces, _) =?
|
||||||
done = await one(idxPendingBlocks)
|
(await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err:
|
||||||
idx = pendingBlocks.find(done)
|
trace "Unable to prepare data", error = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
idxPendingBlocks.del(idxPendingBlocks.find(done))
|
if dataPieces >= encoded.ecK:
|
||||||
|
trace "Retrieved all the required data blocks"
|
||||||
without blk =? (await done), error:
|
|
||||||
trace "Failed retrieving block", error = error.msg
|
|
||||||
continue
|
|
||||||
|
|
||||||
if idx >= encoded.K:
|
|
||||||
trace "Retrieved parity block", cid = blk.cid, idx
|
|
||||||
shallowCopy(parityData[idx - encoded.K], if blk.isEmpty: emptyBlock else: blk.data)
|
|
||||||
else:
|
|
||||||
trace "Retrieved data block", cid = blk.cid, idx
|
|
||||||
shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data)
|
|
||||||
|
|
||||||
resolved.inc
|
|
||||||
|
|
||||||
let
|
|
||||||
dataPieces = data.filterIt( it.len > 0 ).len
|
|
||||||
parityPieces = parityData.filterIt( it.len > 0 ).len
|
|
||||||
|
|
||||||
if dataPieces >= encoded.K:
|
|
||||||
trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
trace "Erasure decoding data", data = dataPieces, parity = parityPieces
|
trace "Erasure decoding data"
|
||||||
if (
|
|
||||||
let err = decoder.decode(data, parityData, recovered);
|
|
||||||
err.isErr):
|
|
||||||
trace "Unable to decode manifest!", err = $err.error
|
|
||||||
return failure($err.error)
|
|
||||||
|
|
||||||
for i in 0..<encoded.K:
|
without recovered =? await asyncDecode(self.taskpool, decoder, data, parity, encoded.blockSize.int), err:
|
||||||
if data[i].len <= 0:
|
trace "Error decoding data", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
for i in 0..<encoded.ecK:
|
||||||
|
let idx = i * encoded.steps + step
|
||||||
|
if data[i].len <= 0 and not cids[idx].isEmpty:
|
||||||
without blk =? bt.Block.new(recovered[i]), error:
|
without blk =? bt.Block.new(recovered[i]), error:
|
||||||
trace "Unable to create block!", exc = error.msg
|
trace "Unable to create block!", exc = error.msg
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
trace "Recovered block", cid = blk.cid
|
trace "Recovered block", cid = blk.cid, index = i
|
||||||
if isErr (await self.store.putBlock(blk)):
|
if isErr (await self.store.putBlock(blk)):
|
||||||
trace "Unable to store block!", cid = blk.cid
|
trace "Unable to store block!", cid = blk.cid
|
||||||
return failure("Unable to store block!")
|
return failure("Unable to store block!")
|
||||||
|
|
||||||
|
cids[idx] = blk.cid
|
||||||
|
recoveredIndices.add(idx)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
trace "Erasure coding decoding cancelled"
|
trace "Erasure coding decoding cancelled"
|
||||||
raise exc # cancellation needs to be propagated
|
raise exc # cancellation needs to be propagated
|
||||||
|
@ -254,8 +441,22 @@ proc decode*(
|
||||||
finally:
|
finally:
|
||||||
decoder.release()
|
decoder.release()
|
||||||
|
|
||||||
without decoded =? Manifest.new(blocks = encoded.blocks[0..<encoded.originalLen]), error:
|
without tree =? CodexTree.init(cids[0..<encoded.originalBlocksCount]), err:
|
||||||
return error.failure
|
return failure(err)
|
||||||
|
|
||||||
|
without treeCid =? tree.rootCid, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if treeCid != encoded.originalTreeCid:
|
||||||
|
return failure("Original tree root differs from the tree root computed out of recovered data")
|
||||||
|
|
||||||
|
let idxIter = Iter[Natural].new(recoveredIndices)
|
||||||
|
.filter((i: Natural) => i < tree.leavesCount)
|
||||||
|
|
||||||
|
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
let decoded = Manifest.new(encoded)
|
||||||
|
|
||||||
return decoded.success
|
return decoded.success
|
||||||
|
|
||||||
|
@ -269,9 +470,13 @@ proc new*(
|
||||||
T: type Erasure,
|
T: type Erasure,
|
||||||
store: BlockStore,
|
store: BlockStore,
|
||||||
encoderProvider: EncoderProvider,
|
encoderProvider: EncoderProvider,
|
||||||
decoderProvider: DecoderProvider): Erasure =
|
decoderProvider: DecoderProvider,
|
||||||
|
taskpool: Taskpool): Erasure =
|
||||||
|
## Create a new Erasure instance for encoding and decoding manifests
|
||||||
|
##
|
||||||
|
|
||||||
Erasure(
|
Erasure(
|
||||||
store: store,
|
store: store,
|
||||||
encoderProvider: encoderProvider,
|
encoderProvider: encoderProvider,
|
||||||
decoderProvider: decoderProvider)
|
decoderProvider: decoderProvider,
|
||||||
|
taskpool: taskpool)
|
||||||
|
|
|
@ -7,16 +7,43 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
import std/options
|
||||||
|
|
||||||
import pkg/stew/results
|
import pkg/stew/results
|
||||||
|
import pkg/chronos
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
export results
|
||||||
|
|
||||||
type
|
type
|
||||||
CodexError* = object of CatchableError # base codex error
|
CodexError* = object of CatchableError # base codex error
|
||||||
CodexResult*[T] = Result[T, ref CodexError]
|
CodexResult*[T] = Result[T, ref CodexError]
|
||||||
|
|
||||||
template mapFailure*(
|
template mapFailure*[T, V, E](
|
||||||
exp: untyped,
|
exp: Result[T, V],
|
||||||
exc: typed = type CodexError): untyped =
|
exc: typedesc[E],
|
||||||
|
): Result[T, ref CatchableError] =
|
||||||
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
||||||
##
|
##
|
||||||
|
|
||||||
((exp.mapErr do (e: auto) -> ref CatchableError: (ref exc)(msg: $e)))
|
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
|
||||||
|
|
||||||
|
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
|
||||||
|
mapFailure(exp, CodexError)
|
||||||
|
|
||||||
|
# TODO: using a template here, causes bad codegen
|
||||||
|
func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
||||||
|
if exp.isSome:
|
||||||
|
success exp.get
|
||||||
|
else:
|
||||||
|
T.failure("Option is None")
|
||||||
|
|
||||||
|
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
||||||
|
try:
|
||||||
|
await allFuturesThrowing(fut)
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
return failure(exc.msg)
|
||||||
|
|
||||||
|
return success()
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
## Nim-Codex
|
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
|
||||||
## Licensed under either of
|
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
||||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
||||||
## at your option.
|
|
||||||
## This file may not be copied, modified, or distributed except according to
|
|
||||||
## those terms.
|
|
||||||
|
|
||||||
import std/strutils
|
|
||||||
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/libp2p
|
|
||||||
|
|
||||||
func shortLog*(cid: Cid): string =
|
|
||||||
## Returns compact string representation of ``pid``.
|
|
||||||
var scid = $cid
|
|
||||||
if len(scid) > 10:
|
|
||||||
scid[3] = '*'
|
|
||||||
|
|
||||||
when (NimMajor, NimMinor) > (1, 4):
|
|
||||||
scid.delete(4 .. scid.high - 6)
|
|
||||||
else:
|
|
||||||
scid.delete(4, scid.high - 6)
|
|
||||||
|
|
||||||
scid
|
|
||||||
|
|
||||||
chronicles.formatIt(Cid): shortLog(it)
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
import ./errors
|
||||||
|
import ./utils
|
||||||
|
import ./utils/asynciter
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
type
|
||||||
|
StrategyType* = enum
|
||||||
|
# Simplest approach:
|
||||||
|
# 0 => 0, 1, 2
|
||||||
|
# 1 => 3, 4, 5
|
||||||
|
# 2 => 6, 7, 8
|
||||||
|
LinearStrategy,
|
||||||
|
|
||||||
|
# Stepped indexing:
|
||||||
|
# 0 => 0, 3, 6
|
||||||
|
# 1 => 1, 4, 7
|
||||||
|
# 2 => 2, 5, 8
|
||||||
|
SteppedStrategy
|
||||||
|
|
||||||
|
# Representing a strategy for grouping indices (of blocks usually)
|
||||||
|
# Given an interation-count as input, will produce a seq of
|
||||||
|
# selected indices.
|
||||||
|
|
||||||
|
IndexingError* = object of CodexError
|
||||||
|
IndexingWrongIndexError* = object of IndexingError
|
||||||
|
IndexingWrongIterationsError* = object of IndexingError
|
||||||
|
|
||||||
|
IndexingStrategy* = object
|
||||||
|
strategyType*: StrategyType
|
||||||
|
firstIndex*: int # Lowest index that can be returned
|
||||||
|
lastIndex*: int # Highest index that can be returned
|
||||||
|
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
|
||||||
|
step*: int
|
||||||
|
|
||||||
|
func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} =
|
||||||
|
if iteration >= self.iterations:
|
||||||
|
raise newException(
|
||||||
|
IndexingError,
|
||||||
|
"Indexing iteration can't be greater than or equal to iterations.")
|
||||||
|
|
||||||
|
func getIter(first, last, step: int): Iter[int] =
|
||||||
|
{.cast(noSideEffect).}:
|
||||||
|
Iter[int].new(first, last, step)
|
||||||
|
|
||||||
|
func getLinearIndicies(
|
||||||
|
self: IndexingStrategy,
|
||||||
|
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||||
|
self.checkIteration(iteration)
|
||||||
|
|
||||||
|
let
|
||||||
|
first = self.firstIndex + iteration * self.step
|
||||||
|
last = min(first + self.step - 1, self.lastIndex)
|
||||||
|
|
||||||
|
getIter(first, last, 1)
|
||||||
|
|
||||||
|
func getSteppedIndicies(
|
||||||
|
self: IndexingStrategy,
|
||||||
|
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||||
|
self.checkIteration(iteration)
|
||||||
|
|
||||||
|
let
|
||||||
|
first = self.firstIndex + iteration
|
||||||
|
last = self.lastIndex
|
||||||
|
|
||||||
|
getIter(first, last, self.iterations)
|
||||||
|
|
||||||
|
func getIndicies*(
|
||||||
|
self: IndexingStrategy,
|
||||||
|
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||||
|
|
||||||
|
case self.strategyType
|
||||||
|
of StrategyType.LinearStrategy:
|
||||||
|
self.getLinearIndicies(iteration)
|
||||||
|
of StrategyType.SteppedStrategy:
|
||||||
|
self.getSteppedIndicies(iteration)
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
strategy: StrategyType,
|
||||||
|
firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} =
|
||||||
|
|
||||||
|
if firstIndex > lastIndex:
|
||||||
|
raise newException(
|
||||||
|
IndexingWrongIndexError,
|
||||||
|
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")")
|
||||||
|
|
||||||
|
if iterations <= 0:
|
||||||
|
raise newException(
|
||||||
|
IndexingWrongIterationsError,
|
||||||
|
"iterations (" & $iterations & ") must be greater than zero.")
|
||||||
|
|
||||||
|
IndexingStrategy(
|
||||||
|
strategyType: strategy,
|
||||||
|
firstIndex: firstIndex,
|
||||||
|
lastIndex: lastIndex,
|
||||||
|
iterations: iterations,
|
||||||
|
step: divUp((lastIndex - firstIndex + 1), iterations))
|
|
@ -0,0 +1,242 @@
|
||||||
|
## logutils is a module that has several goals:
|
||||||
|
## 1. Fix json logging output (run with `--log-format=json`) which was
|
||||||
|
## effectively broken for many types using default Chronicles json
|
||||||
|
## serialization.
|
||||||
|
## 2. Ability to specify log output for textlines and json sinks together or
|
||||||
|
## separately
|
||||||
|
## - This is useful if consuming json in some kind of log parser and need
|
||||||
|
## valid json with real values
|
||||||
|
## - eg a shortened Cid is nice to see in a text log in stdout, but won't
|
||||||
|
## provide a real Cid when parsed in json
|
||||||
|
## 4. Remove usages of `nim-json-serialization` from the codebase
|
||||||
|
## 5. Remove need to declare `writeValue` for new types
|
||||||
|
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
|
||||||
|
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
|
||||||
|
##
|
||||||
|
## When declaring a new type, one should consider importing the `codex/logutils`
|
||||||
|
## module, and specifying `formatIt`. If textlines log output and json log output
|
||||||
|
## need to be different, overload `formatIt` and specify a `LogFormat`. If json
|
||||||
|
## serialization is needed, it can be declared with a `%` proc. `logutils`
|
||||||
|
## imports and exports `nim-serde` which handles the de/serialization, examples
|
||||||
|
## below. **Only `codex/logutils` needs to be imported.**
|
||||||
|
##
|
||||||
|
## Using `logutils` in the Codex codebase:
|
||||||
|
## - Instead of importing `pkg/chronicles`, import `pkg/codex/logutils`
|
||||||
|
## - most of `chronicles` is exported by `logutils`
|
||||||
|
## - Instead of importing `std/json`, import `pkg/serde/json`
|
||||||
|
## - `std/json` is exported by `serde` which is exported by `logutils`
|
||||||
|
## - Instead of importing `pkg/nim-json-serialization`, import
|
||||||
|
## `pkg/serde/json` or use codex-specific overloads by importing `utils/json`
|
||||||
|
## - one of the goals is to remove the use of `nim-json-serialization`
|
||||||
|
##
|
||||||
|
## ```nim
|
||||||
|
## import pkg/codex/logutils
|
||||||
|
##
|
||||||
|
## type
|
||||||
|
## BlockAddress* = object
|
||||||
|
## case leaf*: bool
|
||||||
|
## of true:
|
||||||
|
## treeCid* {.serialize.}: Cid
|
||||||
|
## index* {.serialize.}: Natural
|
||||||
|
## else:
|
||||||
|
## cid* {.serialize.}: Cid
|
||||||
|
##
|
||||||
|
## logutils.formatIt(LogFormat.textLines, BlockAddress):
|
||||||
|
## if it.leaf:
|
||||||
|
## "treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
|
||||||
|
## else:
|
||||||
|
## "cid: " & shortLog($it.cid)
|
||||||
|
##
|
||||||
|
## logutils.formatIt(LogFormat.json, BlockAddress): %it
|
||||||
|
##
|
||||||
|
## # chronicles textlines output
|
||||||
|
## TRC test tid=14397405 ba="treeCid: zb2*fndjU1, index: 0"
|
||||||
|
## # chronicles json output
|
||||||
|
## {"lvl":"TRC","msg":"test","tid":14397405,"ba":{"treeCid":"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1","index":0}}
|
||||||
|
## ```
|
||||||
|
## In this case, `BlockAddress` is just an object, so `nim-serde` can handle
|
||||||
|
## serializing it without issue (only fields annotated with `{.serialize.}` will
|
||||||
|
## serialize (aka opt-in serialization)).
|
||||||
|
##
|
||||||
|
## If one so wished, another option for the textlines log output, would be to
|
||||||
|
## simply `toString` the serialised json:
|
||||||
|
## ```nim
|
||||||
|
## logutils.formatIt(LogFormat.textLines, BlockAddress): $ %it
|
||||||
|
## # or, more succinctly:
|
||||||
|
## logutils.formatIt(LogFormat.textLines, BlockAddress): it.toJson
|
||||||
|
## ```
|
||||||
|
## In that case, both the textlines and json sinks would have the same output,
|
||||||
|
## so we could reduce this even further by not specifying a `LogFormat`:
|
||||||
|
## ```nim
|
||||||
|
## type
|
||||||
|
## BlockAddress* = object
|
||||||
|
## case leaf*: bool
|
||||||
|
## of true:
|
||||||
|
## treeCid* {.serialize.}: Cid
|
||||||
|
## index* {.serialize.}: Natural
|
||||||
|
## else:
|
||||||
|
## cid* {.serialize.}: Cid
|
||||||
|
##
|
||||||
|
## logutils.formatIt(BlockAddress): %it
|
||||||
|
##
|
||||||
|
## # chronicles textlines output
|
||||||
|
## TRC test tid=14400673 ba="{\"treeCid\":\"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1\",\"index\":0}"
|
||||||
|
## # chronicles json output
|
||||||
|
## {"lvl":"TRC","msg":"test","tid":14400673,"ba":{"treeCid":"zb2rhgsDE16rLtbwTFeNKbdSobtKiWdjJPvKEuPgrQAfndjU1","index":0}}
|
||||||
|
## ```
|
||||||
|
|
||||||
|
import std/options
|
||||||
|
import std/sequtils
|
||||||
|
import std/strutils
|
||||||
|
import std/sugar
|
||||||
|
import std/typetraits
|
||||||
|
|
||||||
|
import pkg/chronicles except toJson, `%`
|
||||||
|
from pkg/libp2p import Cid, MultiAddress, `$`
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
import ./utils/json except formatIt # TODO: remove exception?
|
||||||
|
import pkg/stew/byteutils
|
||||||
|
import pkg/stint
|
||||||
|
import pkg/upraises
|
||||||
|
|
||||||
|
export byteutils
|
||||||
|
export chronicles except toJson, formatIt, `%`
|
||||||
|
export questionable
|
||||||
|
export sequtils
|
||||||
|
export json except formatIt
|
||||||
|
export strutils
|
||||||
|
export sugar
|
||||||
|
export upraises
|
||||||
|
export results
|
||||||
|
|
||||||
|
func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
|
||||||
|
## Returns compact string representation of ``long``.
|
||||||
|
var short = long
|
||||||
|
let minLen = start + ellipses.len + stop
|
||||||
|
if len(short) > minLen:
|
||||||
|
short.insert(ellipses, start)
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) > (1, 4):
|
||||||
|
short.delete(start + ellipses.len .. short.high - stop)
|
||||||
|
else:
|
||||||
|
short.delete(start + ellipses.len, short.high - stop)
|
||||||
|
|
||||||
|
short
|
||||||
|
|
||||||
|
func shortHexLog*(long: string): string =
|
||||||
|
if long[0..1] == "0x": result &= "0x"
|
||||||
|
result &= long[2..long.high].shortLog("..", 4, 4)
|
||||||
|
|
||||||
|
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
|
||||||
|
v.to0xHex.shortHexLog
|
||||||
|
|
||||||
|
func short0xHexLog*[T: distinct](v: T): string =
|
||||||
|
type BaseType = T.distinctBase
|
||||||
|
BaseType(v).short0xHexLog
|
||||||
|
|
||||||
|
func short0xHexLog*[U: distinct, T: seq[U]](v: T): string =
|
||||||
|
type BaseType = U.distinctBase
|
||||||
|
"@[" & v.map(x => BaseType(x).short0xHexLog).join(",") & "]"
|
||||||
|
|
||||||
|
func to0xHexLog*[T: distinct](v: T): string =
|
||||||
|
type BaseType = T.distinctBase
|
||||||
|
BaseType(v).to0xHex
|
||||||
|
|
||||||
|
func to0xHexLog*[U: distinct, T: seq[U]](v: T): string =
|
||||||
|
type BaseType = U.distinctBase
|
||||||
|
"@[" & v.map(x => BaseType(x).to0xHex).join(",") & "]"
|
||||||
|
|
||||||
|
proc formatTextLineSeq*(val: seq[string]): string =
|
||||||
|
"@[" & val.join(", ") & "]"
|
||||||
|
|
||||||
|
template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
||||||
|
# Provides formatters for logging with Chronicles for the given type and
|
||||||
|
# `LogFormat`.
|
||||||
|
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden
|
||||||
|
# since the base `setProperty` is generic using `auto` and conflicts with
|
||||||
|
# providing a generic `seq` and `Option` override.
|
||||||
|
when format == LogFormat.json:
|
||||||
|
proc formatJsonOption(val: ?T): JsonNode =
|
||||||
|
if it =? val:
|
||||||
|
json.`%`(body)
|
||||||
|
else:
|
||||||
|
newJNull()
|
||||||
|
|
||||||
|
proc formatJsonResult*(val: ?!T): JsonNode =
|
||||||
|
without it =? val, error:
|
||||||
|
let jObj = newJObject()
|
||||||
|
jObj["error"] = newJString(error.msg)
|
||||||
|
return jObj
|
||||||
|
json.`%`(body)
|
||||||
|
|
||||||
|
proc setProperty*(r: var JsonRecord, key: string, res: ?!T) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
setProperty(r, key, res.formatJsonResult)
|
||||||
|
|
||||||
|
proc setProperty*(r: var JsonRecord, key: string, opt: ?T) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
let v = opt.formatJsonOption
|
||||||
|
setProperty(r, key, v)
|
||||||
|
|
||||||
|
proc setProperty*(r: var JsonRecord, key: string, opts: seq[?T]) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
let v = opts.map(opt => opt.formatJsonOption)
|
||||||
|
setProperty(r, key, json.`%`(v))
|
||||||
|
|
||||||
|
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
let v = val.map(it => body)
|
||||||
|
setProperty(r, key, json.`%`(v))
|
||||||
|
|
||||||
|
proc setProperty*(r: var JsonRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||||
|
var it {.inject, used.}: T = val
|
||||||
|
let v = body
|
||||||
|
setProperty(r, key, json.`%`(v))
|
||||||
|
|
||||||
|
elif format == LogFormat.textLines:
|
||||||
|
proc formatTextLineOption*(val: ?T): string =
|
||||||
|
var v = "none(" & $T & ")"
|
||||||
|
if it =? val:
|
||||||
|
v = "some(" & $(body) & ")" # that I used to know :)
|
||||||
|
v
|
||||||
|
|
||||||
|
proc formatTextLineResult*(val: ?!T): string =
|
||||||
|
without it =? val, error:
|
||||||
|
return "Error: " & error.msg
|
||||||
|
$(body)
|
||||||
|
|
||||||
|
proc setProperty*(r: var TextLineRecord, key: string, res: ?!T) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
setProperty(r, key, res.formatTextLineResult)
|
||||||
|
|
||||||
|
proc setProperty*(r: var TextLineRecord, key: string, opt: ?T) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
let v = opt.formatTextLineOption
|
||||||
|
setProperty(r, key, v)
|
||||||
|
|
||||||
|
proc setProperty*(r: var TextLineRecord, key: string, opts: seq[?T]) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
let v = opts.map(opt => opt.formatTextLineOption)
|
||||||
|
setProperty(r, key, v.formatTextLineSeq)
|
||||||
|
|
||||||
|
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) =
|
||||||
|
var it {.inject, used.}: T
|
||||||
|
let v = val.map(it => body)
|
||||||
|
setProperty(r, key, v.formatTextLineSeq)
|
||||||
|
|
||||||
|
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||||
|
var it {.inject, used.}: T = val
|
||||||
|
let v = body
|
||||||
|
setProperty(r, key, v)
|
||||||
|
|
||||||
|
template formatIt*(T: type, body: untyped) {.dirty.} =
|
||||||
|
formatIt(LogFormat.textLines, T): body
|
||||||
|
formatIt(LogFormat.json, T): body
|
||||||
|
|
||||||
|
formatIt(LogFormat.textLines, Cid): shortLog($it)
|
||||||
|
formatIt(LogFormat.json, Cid): $it
|
||||||
|
formatIt(UInt256): $it
|
||||||
|
formatIt(MultiAddress): $it
|
||||||
|
formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog
|
||||||
|
formatIt(LogFormat.json, array[32, byte]): it.to0xHex
|
|
@ -1,5 +1,4 @@
|
||||||
import ./manifest/coders
|
import ./manifest/coders
|
||||||
import ./manifest/manifest
|
import ./manifest/manifest
|
||||||
import ./manifest/types
|
|
||||||
|
|
||||||
export types, manifest, coders
|
export manifest, coders
|
||||||
|
|
|
@ -14,19 +14,20 @@ import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import std/tables
|
import std/tables
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
|
||||||
import ./manifest
|
import ./manifest
|
||||||
import ../errors
|
import ../errors
|
||||||
import ../blocktype
|
import ../blocktype
|
||||||
import ./types
|
import ../logutils
|
||||||
|
import ../indexingstrategy
|
||||||
|
|
||||||
func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||||
## Encode the manifest into a ``ManifestCodec``
|
## Encode the manifest into a ``ManifestCodec``
|
||||||
## multicodec container (Dag-pb) for now
|
## multicodec container (Dag-pb) for now
|
||||||
##
|
##
|
||||||
|
@ -34,54 +35,67 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
||||||
? manifest.verify()
|
? manifest.verify()
|
||||||
var pbNode = initProtoBuffer()
|
var pbNode = initProtoBuffer()
|
||||||
|
|
||||||
for c in manifest.blocks:
|
|
||||||
var pbLink = initProtoBuffer()
|
|
||||||
pbLink.write(1, c.data.buffer) # write Cid links
|
|
||||||
pbLink.finish()
|
|
||||||
pbNode.write(2, pbLink)
|
|
||||||
|
|
||||||
# NOTE: The `Data` field in the the `dag-pb`
|
# NOTE: The `Data` field in the the `dag-pb`
|
||||||
# contains the following protobuf `Message`
|
# contains the following protobuf `Message`
|
||||||
#
|
#
|
||||||
# ```protobuf
|
# ```protobuf
|
||||||
# Message ErasureInfo {
|
# Message VerificationInfo {
|
||||||
# optional uint32 K = 1; # number of encoded blocks
|
# bytes verifyRoot = 1; # Decimal encoded field-element
|
||||||
# optional uint32 M = 2; # number of parity blocks
|
# repeated bytes slotRoots = 2; # Decimal encoded field-elements
|
||||||
# optional bytes cid = 3; # cid of the original dataset
|
|
||||||
# optional uint32 original = 4; # number of original blocks
|
|
||||||
# }
|
# }
|
||||||
|
# Message ErasureInfo {
|
||||||
|
# optional uint32 ecK = 1; # number of encoded blocks
|
||||||
|
# optional uint32 ecM = 2; # number of parity blocks
|
||||||
|
# optional bytes originalTreeCid = 3; # cid of the original dataset
|
||||||
|
# optional uint32 originalDatasetSize = 4; # size of the original dataset
|
||||||
|
# optional VerificationInformation verification = 5; # verification information
|
||||||
|
# }
|
||||||
|
#
|
||||||
# Message Header {
|
# Message Header {
|
||||||
# optional bytes rootHash = 1; # the root (tree) hash
|
# optional bytes treeCid = 1; # cid (root) of the tree
|
||||||
# optional uint32 blockSize = 2; # size of a single block
|
# optional uint32 blockSize = 2; # size of a single block
|
||||||
# optional uint32 blocksLen = 3; # total amount of blocks
|
# optional uint64 datasetSize = 3; # size of the dataset
|
||||||
# optional ErasureInfo erasure = 4; # erasure coding info
|
# optional codec: MultiCodec = 4; # Dataset codec
|
||||||
# optional uint64 originalBytes = 5;# exact file size
|
# optional hcodec: MultiCodec = 5 # Multihash codec
|
||||||
|
# optional version: CidVersion = 6; # Cid version
|
||||||
|
# optional ErasureInfo erasure = 7; # erasure coding info
|
||||||
# }
|
# }
|
||||||
# ```
|
# ```
|
||||||
#
|
#
|
||||||
|
# var treeRootVBuf = initVBuffer()
|
||||||
let cid = !manifest.rootHash
|
|
||||||
var header = initProtoBuffer()
|
var header = initProtoBuffer()
|
||||||
header.write(1, cid.data.buffer)
|
header.write(1, manifest.treeCid.data.buffer)
|
||||||
header.write(2, manifest.blockSize.uint32)
|
header.write(2, manifest.blockSize.uint32)
|
||||||
header.write(3, manifest.len.uint32)
|
header.write(3, manifest.datasetSize.uint64)
|
||||||
header.write(5, manifest.originalBytes.uint64)
|
header.write(4, manifest.codec.uint32)
|
||||||
|
header.write(5, manifest.hcodec.uint32)
|
||||||
|
header.write(6, manifest.version.uint32)
|
||||||
if manifest.protected:
|
if manifest.protected:
|
||||||
var erasureInfo = initProtoBuffer()
|
var erasureInfo = initProtoBuffer()
|
||||||
erasureInfo.write(1, manifest.K.uint32)
|
erasureInfo.write(1, manifest.ecK.uint32)
|
||||||
erasureInfo.write(2, manifest.M.uint32)
|
erasureInfo.write(2, manifest.ecM.uint32)
|
||||||
erasureInfo.write(3, manifest.originalCid.data.buffer)
|
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
|
||||||
erasureInfo.write(4, manifest.originalLen.uint32)
|
erasureInfo.write(4, manifest.originalDatasetSize.uint64)
|
||||||
|
erasureInfo.write(5, manifest.protectedStrategy.uint32)
|
||||||
|
|
||||||
|
if manifest.verifiable:
|
||||||
|
var verificationInfo = initProtoBuffer()
|
||||||
|
verificationInfo.write(1, manifest.verifyRoot.data.buffer)
|
||||||
|
for slotRoot in manifest.slotRoots:
|
||||||
|
verificationInfo.write(2, slotRoot.data.buffer)
|
||||||
|
verificationInfo.write(3, manifest.cellSize.uint32)
|
||||||
|
verificationInfo.write(4, manifest.verifiableStrategy.uint32)
|
||||||
|
erasureInfo.write(6, verificationInfo)
|
||||||
|
|
||||||
erasureInfo.finish()
|
erasureInfo.finish()
|
||||||
|
header.write(7, erasureInfo)
|
||||||
|
|
||||||
header.write(4, erasureInfo)
|
pbNode.write(1, header) # set the treeCid as the data field
|
||||||
|
|
||||||
pbNode.write(1, header) # set the rootHash Cid as the data field
|
|
||||||
pbNode.finish()
|
pbNode.finish()
|
||||||
|
|
||||||
return pbNode.buffer.success
|
return pbNode.buffer.success
|
||||||
|
|
||||||
func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
|
proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||||
## Decode a manifest from a data blob
|
## Decode a manifest from a data blob
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -89,105 +103,131 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
|
||||||
pbNode = initProtoBuffer(data)
|
pbNode = initProtoBuffer(data)
|
||||||
pbHeader: ProtoBuffer
|
pbHeader: ProtoBuffer
|
||||||
pbErasureInfo: ProtoBuffer
|
pbErasureInfo: ProtoBuffer
|
||||||
rootHash: seq[byte]
|
pbVerificationInfo: ProtoBuffer
|
||||||
originalCid: seq[byte]
|
treeCidBuf: seq[byte]
|
||||||
originalBytes: uint64
|
originalTreeCid: seq[byte]
|
||||||
|
datasetSize: uint64
|
||||||
|
codec: uint32
|
||||||
|
hcodec: uint32
|
||||||
|
version: uint32
|
||||||
blockSize: uint32
|
blockSize: uint32
|
||||||
blocksLen: uint32
|
originalDatasetSize: uint64
|
||||||
originalLen: uint32
|
ecK, ecM: uint32
|
||||||
K, M: uint32
|
protectedStrategy: uint32
|
||||||
blocks: seq[Cid]
|
verifyRoot: seq[byte]
|
||||||
|
slotRoots: seq[seq[byte]]
|
||||||
|
cellSize: uint32
|
||||||
|
verifiableStrategy: uint32
|
||||||
|
|
||||||
# Decode `Header` message
|
# Decode `Header` message
|
||||||
if pbNode.getField(1, pbHeader).isErr:
|
if pbNode.getField(1, pbHeader).isErr:
|
||||||
return failure("Unable to decode `Header` from dag-pb manifest!")
|
return failure("Unable to decode `Header` from dag-pb manifest!")
|
||||||
|
|
||||||
# Decode `Header` contents
|
# Decode `Header` contents
|
||||||
if pbHeader.getField(1, rootHash).isErr:
|
if pbHeader.getField(1, treeCidBuf).isErr:
|
||||||
return failure("Unable to decode `rootHash` from manifest!")
|
return failure("Unable to decode `treeCid` from manifest!")
|
||||||
|
|
||||||
if pbHeader.getField(2, blockSize).isErr:
|
if pbHeader.getField(2, blockSize).isErr:
|
||||||
return failure("Unable to decode `blockSize` from manifest!")
|
return failure("Unable to decode `blockSize` from manifest!")
|
||||||
|
|
||||||
if pbHeader.getField(3, blocksLen).isErr:
|
if pbHeader.getField(3, datasetSize).isErr:
|
||||||
return failure("Unable to decode `blocksLen` from manifest!")
|
return failure("Unable to decode `datasetSize` from manifest!")
|
||||||
|
|
||||||
if pbHeader.getField(5, originalBytes).isErr:
|
if pbHeader.getField(4, codec).isErr:
|
||||||
return failure("Unable to decode `originalBytes` from manifest!")
|
return failure("Unable to decode `codec` from manifest!")
|
||||||
|
|
||||||
if pbHeader.getField(4, pbErasureInfo).isErr:
|
if pbHeader.getField(5, hcodec).isErr:
|
||||||
|
return failure("Unable to decode `hcodec` from manifest!")
|
||||||
|
|
||||||
|
if pbHeader.getField(6, version).isErr:
|
||||||
|
return failure("Unable to decode `version` from manifest!")
|
||||||
|
|
||||||
|
if pbHeader.getField(7, pbErasureInfo).isErr:
|
||||||
return failure("Unable to decode `erasureInfo` from manifest!")
|
return failure("Unable to decode `erasureInfo` from manifest!")
|
||||||
|
|
||||||
if pbErasureInfo.buffer.len > 0:
|
let protected = pbErasureInfo.buffer.len > 0
|
||||||
if pbErasureInfo.getField(1, K).isErr:
|
var verifiable = false
|
||||||
|
if protected:
|
||||||
|
if pbErasureInfo.getField(1, ecK).isErr:
|
||||||
return failure("Unable to decode `K` from manifest!")
|
return failure("Unable to decode `K` from manifest!")
|
||||||
|
|
||||||
if pbErasureInfo.getField(2, M).isErr:
|
if pbErasureInfo.getField(2, ecM).isErr:
|
||||||
return failure("Unable to decode `M` from manifest!")
|
return failure("Unable to decode `M` from manifest!")
|
||||||
|
|
||||||
if pbErasureInfo.getField(3, originalCid).isErr:
|
if pbErasureInfo.getField(3, originalTreeCid).isErr:
|
||||||
return failure("Unable to decode `originalCid` from manifest!")
|
return failure("Unable to decode `originalTreeCid` from manifest!")
|
||||||
|
|
||||||
if pbErasureInfo.getField(4, originalLen).isErr:
|
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
|
||||||
return failure("Unable to decode `originalLen` from manifest!")
|
return failure("Unable to decode `originalDatasetSize` from manifest!")
|
||||||
|
|
||||||
let rootHashCid = ? Cid.init(rootHash).mapFailure
|
if pbErasureInfo.getField(5, protectedStrategy).isErr:
|
||||||
var linksBuf: seq[seq[byte]]
|
return failure("Unable to decode `protectedStrategy` from manifest!")
|
||||||
if pbNode.getRepeatedField(2, linksBuf).isOk:
|
|
||||||
for pbLinkBuf in linksBuf:
|
|
||||||
var
|
|
||||||
blocksBuf: seq[seq[byte]]
|
|
||||||
blockBuf: seq[byte]
|
|
||||||
pbLink = initProtoBuffer(pbLinkBuf)
|
|
||||||
|
|
||||||
if pbLink.getField(1, blockBuf).isOk:
|
if pbErasureInfo.getField(6, pbVerificationInfo).isErr:
|
||||||
blocks.add(? Cid.init(blockBuf).mapFailure)
|
return failure("Unable to decode `verificationInfo` from manifest!")
|
||||||
|
|
||||||
if blocksLen.int != blocks.len:
|
verifiable = pbVerificationInfo.buffer.len > 0
|
||||||
return failure("Total blocks and length of blocks in header don't match!")
|
if verifiable:
|
||||||
|
if pbVerificationInfo.getField(1, verifyRoot).isErr:
|
||||||
|
return failure("Unable to decode `verifyRoot` from manifest!")
|
||||||
|
|
||||||
var
|
if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr:
|
||||||
self = Manifest(
|
return failure("Unable to decode `slotRoots` from manifest!")
|
||||||
rootHash: rootHashCid.some,
|
|
||||||
originalBytes: originalBytes.int,
|
|
||||||
blockSize: blockSize.int,
|
|
||||||
blocks: blocks,
|
|
||||||
hcodec: (? rootHashCid.mhash.mapFailure).mcodec,
|
|
||||||
codec: rootHashCid.mcodec,
|
|
||||||
version: rootHashCid.cidver,
|
|
||||||
protected: pbErasureInfo.buffer.len > 0)
|
|
||||||
|
|
||||||
if self.protected:
|
if pbVerificationInfo.getField(3, cellSize).isErr:
|
||||||
self.K = K.int
|
return failure("Unable to decode `cellSize` from manifest!")
|
||||||
self.M = M.int
|
|
||||||
self.originalCid = ? Cid.init(originalCid).mapFailure
|
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
|
||||||
self.originalLen = originalLen.int
|
return failure("Unable to decode `verifiableStrategy` from manifest!")
|
||||||
|
|
||||||
|
let
|
||||||
|
treeCid = ? Cid.init(treeCidBuf).mapFailure
|
||||||
|
|
||||||
|
let
|
||||||
|
self = if protected:
|
||||||
|
Manifest.new(
|
||||||
|
treeCid = treeCid,
|
||||||
|
datasetSize = datasetSize.NBytes,
|
||||||
|
blockSize = blockSize.NBytes,
|
||||||
|
version = CidVersion(version),
|
||||||
|
hcodec = hcodec.MultiCodec,
|
||||||
|
codec = codec.MultiCodec,
|
||||||
|
ecK = ecK.int,
|
||||||
|
ecM = ecM.int,
|
||||||
|
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
|
||||||
|
originalDatasetSize = originalDatasetSize.NBytes,
|
||||||
|
strategy = StrategyType(protectedStrategy))
|
||||||
|
else:
|
||||||
|
Manifest.new(
|
||||||
|
treeCid = treeCid,
|
||||||
|
datasetSize = datasetSize.NBytes,
|
||||||
|
blockSize = blockSize.NBytes,
|
||||||
|
version = CidVersion(version),
|
||||||
|
hcodec = hcodec.MultiCodec,
|
||||||
|
codec = codec.MultiCodec)
|
||||||
|
|
||||||
? self.verify()
|
? self.verify()
|
||||||
|
|
||||||
|
if verifiable:
|
||||||
|
let
|
||||||
|
verifyRootCid = ? Cid.init(verifyRoot).mapFailure
|
||||||
|
slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure)
|
||||||
|
|
||||||
|
return Manifest.new(
|
||||||
|
manifest = self,
|
||||||
|
verifyRoot = verifyRootCid,
|
||||||
|
slotRoots = slotRootCids,
|
||||||
|
cellSize = cellSize.NBytes,
|
||||||
|
strategy = StrategyType(verifiableStrategy)
|
||||||
|
)
|
||||||
|
|
||||||
self.success
|
self.success
|
||||||
|
|
||||||
proc encode*(
|
func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
||||||
self: Manifest,
|
|
||||||
encoder = ManifestContainers[$DagPBCodec]): ?!seq[byte] =
|
|
||||||
## Encode a manifest using `encoder`
|
|
||||||
##
|
|
||||||
|
|
||||||
if self.rootHash.isNone:
|
|
||||||
? self.makeRoot()
|
|
||||||
|
|
||||||
encoder.encode(self)
|
|
||||||
|
|
||||||
func decode*(
|
|
||||||
_: type Manifest,
|
|
||||||
data: openArray[byte],
|
|
||||||
decoder = ManifestContainers[$DagPBCodec]): ?!Manifest =
|
|
||||||
## Decode a manifest using `decoder`
|
## Decode a manifest using `decoder`
|
||||||
##
|
##
|
||||||
|
|
||||||
decoder.decode(data)
|
if not ? blk.cid.isManifest:
|
||||||
|
return failure "Cid not a manifest codec"
|
||||||
|
|
||||||
func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
Manifest.decode(blk.data)
|
||||||
without contentType =? blk.cid.contentType() and
|
|
||||||
containerType =? ManifestContainers.?[$contentType]:
|
|
||||||
return failure "CID has invalid content type for manifest"
|
|
||||||
Manifest.decode(blk.data, containerType)
|
|
||||||
|
|
|
@ -14,209 +14,327 @@ import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
import pkg/libp2p
|
import pkg/libp2p/[cid, multihash, multicodec]
|
||||||
import pkg/questionable
|
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/chronicles
|
|
||||||
|
|
||||||
import ../errors
|
import ../errors
|
||||||
import ../utils
|
import ../utils
|
||||||
|
import ../utils/json
|
||||||
|
import ../units
|
||||||
import ../blocktype
|
import ../blocktype
|
||||||
import ./types
|
import ../indexingstrategy
|
||||||
import ./coders
|
import ../logutils
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Manifest should be reworked to more concrete types,
|
||||||
|
# perhaps using inheritance
|
||||||
|
type
|
||||||
|
Manifest* = ref object of RootObj
|
||||||
|
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||||
|
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||||
|
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||||
|
codec: MultiCodec # Dataset codec
|
||||||
|
hcodec: MultiCodec # Multihash codec
|
||||||
|
version: CidVersion # Cid version
|
||||||
|
case protected {.serialize.}: bool # Protected datasets have erasure coded info
|
||||||
|
of true:
|
||||||
|
ecK: int # Number of blocks to encode
|
||||||
|
ecM: int # Number of resulting parity blocks
|
||||||
|
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
||||||
|
originalDatasetSize: NBytes
|
||||||
|
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||||
|
case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs
|
||||||
|
of true:
|
||||||
|
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
||||||
|
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
||||||
|
cellSize: NBytes # Size of each slot cell
|
||||||
|
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||||
|
else:
|
||||||
|
discard
|
||||||
|
else:
|
||||||
|
discard
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# Accessors
|
||||||
|
############################################################
|
||||||
|
|
||||||
|
func blockSize*(self: Manifest): NBytes =
|
||||||
|
self.blockSize
|
||||||
|
|
||||||
|
func datasetSize*(self: Manifest): NBytes =
|
||||||
|
self.datasetSize
|
||||||
|
|
||||||
|
func version*(self: Manifest): CidVersion =
|
||||||
|
self.version
|
||||||
|
|
||||||
|
func hcodec*(self: Manifest): MultiCodec =
|
||||||
|
self.hcodec
|
||||||
|
|
||||||
|
func codec*(self: Manifest): MultiCodec =
|
||||||
|
self.codec
|
||||||
|
|
||||||
|
func protected*(self: Manifest): bool =
|
||||||
|
self.protected
|
||||||
|
|
||||||
|
func ecK*(self: Manifest): int =
|
||||||
|
self.ecK
|
||||||
|
|
||||||
|
func ecM*(self: Manifest): int =
|
||||||
|
self.ecM
|
||||||
|
|
||||||
|
func originalTreeCid*(self: Manifest): Cid =
|
||||||
|
self.originalTreeCid
|
||||||
|
|
||||||
|
func originalBlocksCount*(self: Manifest): int =
|
||||||
|
divUp(self.originalDatasetSize.int, self.blockSize.int)
|
||||||
|
|
||||||
|
func originalDatasetSize*(self: Manifest): NBytes =
|
||||||
|
self.originalDatasetSize
|
||||||
|
|
||||||
|
func treeCid*(self: Manifest): Cid =
|
||||||
|
self.treeCid
|
||||||
|
|
||||||
|
func blocksCount*(self: Manifest): int =
|
||||||
|
divUp(self.datasetSize.int, self.blockSize.int)
|
||||||
|
|
||||||
|
func verifiable*(self: Manifest): bool =
|
||||||
|
bool (self.protected and self.verifiable)
|
||||||
|
|
||||||
|
func verifyRoot*(self: Manifest): Cid =
|
||||||
|
self.verifyRoot
|
||||||
|
|
||||||
|
func slotRoots*(self: Manifest): seq[Cid] =
|
||||||
|
self.slotRoots
|
||||||
|
|
||||||
|
func numSlots*(self: Manifest): int =
|
||||||
|
self.ecK + self.ecM
|
||||||
|
|
||||||
|
func cellSize*(self: Manifest): NBytes =
|
||||||
|
self.cellSize
|
||||||
|
|
||||||
|
func protectedStrategy*(self: Manifest): StrategyType =
|
||||||
|
self.protectedStrategy
|
||||||
|
|
||||||
|
func verifiableStrategy*(self: Manifest): StrategyType =
|
||||||
|
self.verifiableStrategy
|
||||||
|
|
||||||
|
func numSlotBlocks*(self: Manifest): int =
|
||||||
|
divUp(self.blocksCount, self.numSlots)
|
||||||
|
|
||||||
############################################################
|
############################################################
|
||||||
# Operations on block list
|
# Operations on block list
|
||||||
############################################################
|
############################################################
|
||||||
|
|
||||||
func len*(self: Manifest): int =
|
func isManifest*(cid: Cid): ?!bool =
|
||||||
self.blocks.len
|
success (ManifestCodec == ? cid.contentType().mapFailure(CodexError))
|
||||||
|
|
||||||
func `[]`*(self: Manifest, i: Natural): Cid =
|
|
||||||
self.blocks[i]
|
|
||||||
|
|
||||||
func `[]=`*(self: var Manifest, i: Natural, item: Cid) =
|
|
||||||
self.rootHash = Cid.none
|
|
||||||
self.blocks[i] = item
|
|
||||||
|
|
||||||
func `[]`*(self: Manifest, i: BackwardsIndex): Cid =
|
|
||||||
self.blocks[self.len - i.int]
|
|
||||||
|
|
||||||
func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) =
|
|
||||||
self.rootHash = Cid.none
|
|
||||||
self.blocks[self.len - i.int] = item
|
|
||||||
|
|
||||||
proc add*(self: Manifest, cid: Cid) =
|
|
||||||
assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks
|
|
||||||
self.rootHash = Cid.none
|
|
||||||
trace "Adding cid to manifest", cid
|
|
||||||
self.blocks.add(cid)
|
|
||||||
self.originalBytes = self.blocks.len * self.blockSize
|
|
||||||
|
|
||||||
iterator items*(self: Manifest): Cid =
|
|
||||||
for b in self.blocks:
|
|
||||||
yield b
|
|
||||||
|
|
||||||
iterator pairs*(self: Manifest): tuple[key: int, val: Cid] =
|
|
||||||
for pair in self.blocks.pairs():
|
|
||||||
yield pair
|
|
||||||
|
|
||||||
func contains*(self: Manifest, cid: Cid): bool =
|
|
||||||
cid in self.blocks
|
|
||||||
|
|
||||||
|
func isManifest*(mc: MultiCodec): ?!bool =
|
||||||
|
success mc == ManifestCodec
|
||||||
|
|
||||||
############################################################
|
############################################################
|
||||||
# Various sizes and verification
|
# Various sizes and verification
|
||||||
############################################################
|
############################################################
|
||||||
|
|
||||||
func bytes*(self: Manifest, pad = true): int =
|
|
||||||
## Compute how many bytes corresponding StoreStream(Manifest, pad) will return
|
|
||||||
if pad or self.protected:
|
|
||||||
self.len * self.blockSize
|
|
||||||
else:
|
|
||||||
self.originalBytes
|
|
||||||
|
|
||||||
func rounded*(self: Manifest): int =
|
func rounded*(self: Manifest): int =
|
||||||
## Number of data blocks in *protected* manifest including padding at the end
|
## Number of data blocks in *protected* manifest including padding at the end
|
||||||
roundUp(self.originalLen, self.K)
|
roundUp(self.originalBlocksCount, self.ecK)
|
||||||
|
|
||||||
func steps*(self: Manifest): int =
|
func steps*(self: Manifest): int =
|
||||||
## Number of EC groups in *protected* manifest
|
## Number of EC groups in *protected* manifest
|
||||||
divUp(self.originalLen, self.K)
|
divUp(self.rounded, self.ecK)
|
||||||
|
|
||||||
func verify*(self: Manifest): ?!void =
|
func verify*(self: Manifest): ?!void =
|
||||||
## Check manifest correctness
|
## Check manifest correctness
|
||||||
##
|
##
|
||||||
let originalLen = (if self.protected: self.originalLen else: self.len)
|
|
||||||
|
|
||||||
if divUp(self.originalBytes, self.blockSize) != originalLen:
|
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
|
||||||
return failure newException(CodexError, "Broken manifest: wrong originalBytes")
|
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
||||||
|
|
||||||
if self.protected and (self.len != self.steps * (self.K + self.M)):
|
|
||||||
return failure newException(CodexError, "Broken manifest: wrong originalLen")
|
|
||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
|
func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
|
||||||
|
self.treeCid.success
|
||||||
|
|
||||||
############################################################
|
func `==`*(a, b: Manifest): bool =
|
||||||
# Cid computation
|
(a.treeCid == b.treeCid) and
|
||||||
############################################################
|
(a.datasetSize == b.datasetSize) and
|
||||||
|
(a.blockSize == b.blockSize) and
|
||||||
template hashBytes(mh: MultiHash): seq[byte] =
|
(a.version == b.version) and
|
||||||
## get the hash bytes of a multihash object
|
(a.hcodec == b.hcodec) and
|
||||||
##
|
(a.codec == b.codec) and
|
||||||
|
(a.protected == b.protected) and
|
||||||
mh.data.buffer[mh.dpos..(mh.dpos + mh.size - 1)]
|
(if a.protected:
|
||||||
|
(a.ecK == b.ecK) and
|
||||||
proc makeRoot*(self: Manifest): ?!void =
|
(a.ecM == b.ecM) and
|
||||||
## Create a tree hash root of the contained
|
(a.originalTreeCid == b.originalTreeCid) and
|
||||||
## block hashes
|
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||||
##
|
(a.protectedStrategy == b.protectedStrategy) and
|
||||||
|
(a.verifiable == b.verifiable) and
|
||||||
var
|
(if a.verifiable:
|
||||||
stack: seq[MultiHash]
|
(a.verifyRoot == b.verifyRoot) and
|
||||||
|
(a.slotRoots == b.slotRoots) and
|
||||||
for cid in self:
|
(a.cellSize == b.cellSize) and
|
||||||
stack.add(? cid.mhash.mapFailure)
|
(a.verifiableStrategy == b.verifiableStrategy)
|
||||||
|
else:
|
||||||
while stack.len > 1:
|
true)
|
||||||
let
|
else:
|
||||||
(b1, b2) = (stack.pop(), stack.pop())
|
true)
|
||||||
mh = ? MultiHash.digest(
|
|
||||||
$self.hcodec,
|
|
||||||
(b1.hashBytes() & b2.hashBytes()))
|
|
||||||
.mapFailure
|
|
||||||
stack.add(mh)
|
|
||||||
|
|
||||||
if stack.len == 1:
|
|
||||||
let cid = ? Cid.init(
|
|
||||||
self.version,
|
|
||||||
self.codec,
|
|
||||||
(? EmptyDigests[self.version][self.hcodec].catch))
|
|
||||||
.mapFailure
|
|
||||||
|
|
||||||
self.rootHash = cid.some
|
|
||||||
|
|
||||||
success()
|
|
||||||
|
|
||||||
proc cid*(self: Manifest): ?!Cid =
|
|
||||||
## Generate a root hash using the treehash algorithm
|
|
||||||
##
|
|
||||||
|
|
||||||
if self.rootHash.isNone:
|
|
||||||
? self.makeRoot()
|
|
||||||
|
|
||||||
(!self.rootHash).success
|
|
||||||
|
|
||||||
|
func `$`*(self: Manifest): string =
|
||||||
|
"treeCid: " & $self.treeCid &
|
||||||
|
", datasetSize: " & $self.datasetSize &
|
||||||
|
", blockSize: " & $self.blockSize &
|
||||||
|
", version: " & $self.version &
|
||||||
|
", hcodec: " & $self.hcodec &
|
||||||
|
", codec: " & $self.codec &
|
||||||
|
", protected: " & $self.protected &
|
||||||
|
(if self.protected:
|
||||||
|
", ecK: " & $self.ecK &
|
||||||
|
", ecM: " & $self.ecM &
|
||||||
|
", originalTreeCid: " & $self.originalTreeCid &
|
||||||
|
", originalDatasetSize: " & $self.originalDatasetSize &
|
||||||
|
", verifiable: " & $self.verifiable &
|
||||||
|
(if self.verifiable:
|
||||||
|
", verifyRoot: " & $self.verifyRoot &
|
||||||
|
", slotRoots: " & $self.slotRoots
|
||||||
|
else:
|
||||||
|
"")
|
||||||
|
else:
|
||||||
|
"")
|
||||||
|
|
||||||
############################################################
|
############################################################
|
||||||
# Constructors
|
# Constructors
|
||||||
############################################################
|
############################################################
|
||||||
|
|
||||||
proc new*(
|
func new*(
|
||||||
T: type Manifest,
|
T: type Manifest,
|
||||||
blocks: openArray[Cid] = [],
|
treeCid: Cid,
|
||||||
protected = false,
|
blockSize: NBytes,
|
||||||
version = CIDv1,
|
datasetSize: NBytes,
|
||||||
hcodec = multiCodec("sha2-256"),
|
version: CidVersion = CIDv1,
|
||||||
codec = multiCodec("raw"),
|
hcodec = Sha256HashCodec,
|
||||||
blockSize = BlockSize): ?!T =
|
codec = BlockCodec,
|
||||||
## Create a manifest using array of `Cid`s
|
protected = false): Manifest =
|
||||||
##
|
|
||||||
|
|
||||||
if hcodec notin EmptyDigests[version]:
|
|
||||||
return failure("Unsupported manifest hash codec!")
|
|
||||||
|
|
||||||
T(
|
T(
|
||||||
blocks: @blocks,
|
treeCid: treeCid,
|
||||||
|
blockSize: blockSize,
|
||||||
|
datasetSize: datasetSize,
|
||||||
version: version,
|
version: version,
|
||||||
codec: codec,
|
codec: codec,
|
||||||
hcodec: hcodec,
|
hcodec: hcodec,
|
||||||
blockSize: blockSize,
|
protected: protected)
|
||||||
originalBytes: blocks.len * blockSize,
|
|
||||||
protected: protected).success
|
|
||||||
|
|
||||||
proc new*(
|
func new*(
|
||||||
T: type Manifest,
|
T: type Manifest,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
K, M: int): ?!Manifest =
|
treeCid: Cid,
|
||||||
|
datasetSize: NBytes,
|
||||||
|
ecK, ecM: int,
|
||||||
|
strategy = SteppedStrategy): Manifest =
|
||||||
## Create an erasure protected dataset from an
|
## Create an erasure protected dataset from an
|
||||||
## un-protected one
|
## unprotected one
|
||||||
##
|
##
|
||||||
|
|
||||||
var
|
Manifest(
|
||||||
self = Manifest(
|
treeCid: treeCid,
|
||||||
version: manifest.version,
|
datasetSize: datasetSize,
|
||||||
codec: manifest.codec,
|
version: manifest.version,
|
||||||
hcodec: manifest.hcodec,
|
codec: manifest.codec,
|
||||||
originalBytes: manifest.originalBytes,
|
hcodec: manifest.hcodec,
|
||||||
blockSize: manifest.blockSize,
|
blockSize: manifest.blockSize,
|
||||||
protected: true,
|
protected: true,
|
||||||
K: K, M: M,
|
ecK: ecK, ecM: ecM,
|
||||||
originalCid: ? manifest.cid,
|
originalTreeCid: manifest.treeCid,
|
||||||
originalLen: manifest.len)
|
originalDatasetSize: manifest.datasetSize,
|
||||||
|
protectedStrategy: strategy)
|
||||||
|
|
||||||
let
|
func new*(
|
||||||
encodedLen = self.rounded + (self.steps * M)
|
|
||||||
|
|
||||||
self.blocks = newSeq[Cid](encodedLen)
|
|
||||||
|
|
||||||
# copy original manifest blocks
|
|
||||||
for i in 0..<self.rounded:
|
|
||||||
if i < manifest.len:
|
|
||||||
self.blocks[i] = manifest[i]
|
|
||||||
else:
|
|
||||||
self.blocks[i] = EmptyCid[manifest.version]
|
|
||||||
.catch
|
|
||||||
.get()[manifest.hcodec]
|
|
||||||
.catch
|
|
||||||
.get()
|
|
||||||
|
|
||||||
? self.verify()
|
|
||||||
self.success
|
|
||||||
|
|
||||||
proc new*(
|
|
||||||
T: type Manifest,
|
T: type Manifest,
|
||||||
data: openArray[byte],
|
manifest: Manifest): Manifest =
|
||||||
decoder = ManifestContainers[$DagPBCodec]): ?!T =
|
## Create an unprotected dataset from an
|
||||||
Manifest.decode(data, decoder)
|
## erasure protected one
|
||||||
|
##
|
||||||
|
|
||||||
|
Manifest(
|
||||||
|
treeCid: manifest.originalTreeCid,
|
||||||
|
datasetSize: manifest.originalDatasetSize,
|
||||||
|
version: manifest.version,
|
||||||
|
codec: manifest.codec,
|
||||||
|
hcodec: manifest.hcodec,
|
||||||
|
blockSize: manifest.blockSize,
|
||||||
|
protected: false)
|
||||||
|
|
||||||
|
func new*(
|
||||||
|
T: type Manifest,
|
||||||
|
treeCid: Cid,
|
||||||
|
datasetSize: NBytes,
|
||||||
|
blockSize: NBytes,
|
||||||
|
version: CidVersion,
|
||||||
|
hcodec: MultiCodec,
|
||||||
|
codec: MultiCodec,
|
||||||
|
ecK: int,
|
||||||
|
ecM: int,
|
||||||
|
originalTreeCid: Cid,
|
||||||
|
originalDatasetSize: NBytes,
|
||||||
|
strategy = SteppedStrategy): Manifest =
|
||||||
|
|
||||||
|
Manifest(
|
||||||
|
treeCid: treeCid,
|
||||||
|
datasetSize: datasetSize,
|
||||||
|
blockSize: blockSize,
|
||||||
|
version: version,
|
||||||
|
hcodec: hcodec,
|
||||||
|
codec: codec,
|
||||||
|
protected: true,
|
||||||
|
ecK: ecK,
|
||||||
|
ecM: ecM,
|
||||||
|
originalTreeCid: originalTreeCid,
|
||||||
|
originalDatasetSize: originalDatasetSize,
|
||||||
|
protectedStrategy: strategy)
|
||||||
|
|
||||||
|
func new*(
|
||||||
|
T: type Manifest,
|
||||||
|
manifest: Manifest,
|
||||||
|
verifyRoot: Cid,
|
||||||
|
slotRoots: openArray[Cid],
|
||||||
|
cellSize = DefaultCellSize,
|
||||||
|
strategy = LinearStrategy): ?!Manifest =
|
||||||
|
## Create a verifiable dataset from an
|
||||||
|
## protected one
|
||||||
|
##
|
||||||
|
|
||||||
|
if not manifest.protected:
|
||||||
|
return failure newException(
|
||||||
|
CodexError, "Can create verifiable manifest only from protected manifest.")
|
||||||
|
|
||||||
|
if slotRoots.len != manifest.numSlots:
|
||||||
|
return failure newException(
|
||||||
|
CodexError, "Wrong number of slot roots.")
|
||||||
|
|
||||||
|
success Manifest(
|
||||||
|
treeCid: manifest.treeCid,
|
||||||
|
datasetSize: manifest.datasetSize,
|
||||||
|
version: manifest.version,
|
||||||
|
codec: manifest.codec,
|
||||||
|
hcodec: manifest.hcodec,
|
||||||
|
blockSize: manifest.blockSize,
|
||||||
|
protected: true,
|
||||||
|
ecK: manifest.ecK,
|
||||||
|
ecM: manifest.ecM,
|
||||||
|
originalTreeCid: manifest.originalTreeCid,
|
||||||
|
originalDatasetSize: manifest.originalDatasetSize,
|
||||||
|
protectedStrategy: manifest.protectedStrategy,
|
||||||
|
verifiable: true,
|
||||||
|
verifyRoot: verifyRoot,
|
||||||
|
slotRoots: @slotRoots,
|
||||||
|
cellSize: cellSize,
|
||||||
|
verifiableStrategy: strategy)
|
||||||
|
|
||||||
|
func new*(
|
||||||
|
T: type Manifest,
|
||||||
|
data: openArray[byte]): ?!Manifest =
|
||||||
|
## Create a manifest instance from given data
|
||||||
|
##
|
||||||
|
|
||||||
|
Manifest.decode(data)
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
## Nim-Codex
|
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
|
||||||
## Licensed under either of
|
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
||||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
||||||
## at your option.
|
|
||||||
## This file may not be copied, modified, or distributed except according to
|
|
||||||
## those terms.
|
|
||||||
|
|
||||||
# This module defines Manifest and all related types
|
|
||||||
|
|
||||||
import std/tables
|
|
||||||
import pkg/libp2p
|
|
||||||
import pkg/questionable
|
|
||||||
|
|
||||||
const
|
|
||||||
DagPBCodec* = multiCodec("dag-pb")
|
|
||||||
|
|
||||||
type
|
|
||||||
ManifestCoderType*[codec: static MultiCodec] = object
|
|
||||||
DagPBCoder* = ManifestCoderType[multiCodec("dag-pb")]
|
|
||||||
|
|
||||||
const
|
|
||||||
ManifestContainers* = {
|
|
||||||
$DagPBCodec: DagPBCoder()
|
|
||||||
}.toTable
|
|
||||||
|
|
||||||
type
|
|
||||||
Manifest* = ref object of RootObj
|
|
||||||
rootHash*: ?Cid # Root (tree) hash of the contained data set
|
|
||||||
originalBytes*: int # Exact size of the original (uploaded) file
|
|
||||||
blockSize*: int # Size of each contained block (might not be needed if blocks are len-prefixed)
|
|
||||||
blocks*: seq[Cid] # Block Cid
|
|
||||||
version*: CidVersion # Cid version
|
|
||||||
hcodec*: MultiCodec # Multihash codec
|
|
||||||
codec*: MultiCodec # Data set codec
|
|
||||||
case protected*: bool # Protected datasets have erasure coded info
|
|
||||||
of true:
|
|
||||||
K*: int # Number of blocks to encode
|
|
||||||
M*: int # Number of resulting parity blocks
|
|
||||||
originalCid*: Cid # The original Cid of the dataset being erasure coded
|
|
||||||
originalLen*: int # The length of the original manifest
|
|
||||||
else:
|
|
||||||
discard
|
|
124
codex/market.nim
124
codex/market.nim
|
@ -1,26 +1,62 @@
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
|
import pkg/ethers/erc20
|
||||||
import ./contracts/requests
|
import ./contracts/requests
|
||||||
|
import ./contracts/proofs
|
||||||
import ./clock
|
import ./clock
|
||||||
|
import ./errors
|
||||||
|
import ./periods
|
||||||
|
|
||||||
export chronos
|
export chronos
|
||||||
export questionable
|
export questionable
|
||||||
export requests
|
export requests
|
||||||
|
export proofs
|
||||||
export SecondsSince1970
|
export SecondsSince1970
|
||||||
|
export periods
|
||||||
|
|
||||||
type
|
type
|
||||||
Market* = ref object of RootObj
|
Market* = ref object of RootObj
|
||||||
|
MarketError* = object of CodexError
|
||||||
Subscription* = ref object of RootObj
|
Subscription* = ref object of RootObj
|
||||||
OnRequest* = proc(id: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].}
|
OnRequest* = proc(id: RequestId,
|
||||||
|
ask: StorageAsk,
|
||||||
|
expiry: UInt256) {.gcsafe, upraises:[].}
|
||||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
|
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
|
||||||
|
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||||
|
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].}
|
||||||
|
PastStorageRequest* = object
|
||||||
|
requestId*: RequestId
|
||||||
|
ask*: StorageAsk
|
||||||
|
expiry*: UInt256
|
||||||
|
ProofChallenge* = array[32, byte]
|
||||||
|
|
||||||
|
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method proofTimeout*(market: Market): Future[UInt256] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
||||||
|
let downtime = await market.proofDowntime
|
||||||
|
let pntr = await market.getPointer(slotId)
|
||||||
|
return pntr < downtime
|
||||||
|
|
||||||
method requestStorage*(market: Market,
|
method requestStorage*(market: Market,
|
||||||
request: StorageRequest) {.base, async.} =
|
request: StorageRequest) {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
@ -28,28 +64,49 @@ method requestStorage*(market: Market,
|
||||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getRequest*(market: Market,
|
method getRequest*(market: Market,
|
||||||
id: RequestId):
|
id: RequestId):
|
||||||
Future[?StorageRequest] {.base, async.} =
|
Future[?StorageRequest] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getState*(market: Market,
|
method requestState*(market: Market,
|
||||||
requestId: RequestId): Future[?RequestState] {.base, async.} =
|
requestId: RequestId): Future[?RequestState] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method slotState*(market: Market,
|
||||||
|
slotId: SlotId): Future[SlotState] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getRequestEnd*(market: Market,
|
method getRequestEnd*(market: Market,
|
||||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method requestExpiresAt*(market: Market,
|
||||||
|
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getHost*(market: Market,
|
method getHost*(market: Market,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256): Future[?Address] {.base, async.} =
|
slotIndex: UInt256): Future[?Address] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method getActiveSlot*(
|
||||||
|
market: Market,
|
||||||
|
slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||||
|
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method fillSlot*(market: Market,
|
method fillSlot*(market: Market,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
proof: seq[byte]) {.base, async.} =
|
proof: Groth16Proof,
|
||||||
|
collateral: UInt256) {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method withdrawFunds*(market: Market,
|
method withdrawFunds*(market: Market,
|
||||||
|
@ -61,12 +118,48 @@ method subscribeRequests*(market: Market,
|
||||||
Future[Subscription] {.base, async.} =
|
Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method isProofRequired*(market: Market,
|
||||||
|
id: SlotId): Future[bool] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method willProofBeRequired*(market: Market,
|
||||||
|
id: SlotId): Future[bool] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method submitProof*(market: Market,
|
||||||
|
id: SlotId,
|
||||||
|
proof: Groth16Proof) {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method markProofAsMissing*(market: Market,
|
||||||
|
id: SlotId,
|
||||||
|
period: Period) {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method canProofBeMarkedAsMissing*(market: Market,
|
||||||
|
id: SlotId,
|
||||||
|
period: Period): Future[bool] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method subscribeFulfillment*(market: Market,
|
||||||
|
callback: OnFulfillment):
|
||||||
|
Future[Subscription] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeFulfillment*(market: Market,
|
method subscribeFulfillment*(market: Market,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
callback: OnFulfillment):
|
callback: OnFulfillment):
|
||||||
Future[Subscription] {.base, async.} =
|
Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method subscribeSlotFilled*(market: Market,
|
||||||
|
callback: OnSlotFilled):
|
||||||
|
Future[Subscription] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeSlotFilled*(market: Market,
|
method subscribeSlotFilled*(market: Market,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
|
@ -74,17 +167,42 @@ method subscribeSlotFilled*(market: Market,
|
||||||
Future[Subscription] {.base, async.} =
|
Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method subscribeSlotFreed*(market: Market,
|
||||||
|
callback: OnSlotFreed):
|
||||||
|
Future[Subscription] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method subscribeRequestCancelled*(market: Market,
|
||||||
|
callback: OnRequestCancelled):
|
||||||
|
Future[Subscription] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequestCancelled*(market: Market,
|
method subscribeRequestCancelled*(market: Market,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
callback: OnRequestCancelled):
|
callback: OnRequestCancelled):
|
||||||
Future[Subscription] {.base, async.} =
|
Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method subscribeRequestFailed*(market: Market,
|
||||||
|
callback: OnRequestFailed):
|
||||||
|
Future[Subscription] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequestFailed*(market: Market,
|
method subscribeRequestFailed*(market: Market,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
callback: OnRequestFailed):
|
callback: OnRequestFailed):
|
||||||
Future[Subscription] {.base, async.} =
|
Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method subscribeProofSubmission*(market: Market,
|
||||||
|
callback: OnProofSubmitted):
|
||||||
|
Future[Subscription] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
|
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method queryPastStorageRequests*(market: Market,
|
||||||
|
blocksAgo: int):
|
||||||
|
Future[seq[PastStorageRequest]] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
import ./merkletree/merkletree
|
||||||
|
import ./merkletree/codex
|
||||||
|
import ./merkletree/poseidon2
|
||||||
|
|
||||||
|
export codex, poseidon2, merkletree
|
||||||
|
|
||||||
|
type
|
||||||
|
SomeMerkleTree* = ByteTree | CodexTree | Poseidon2Tree
|
||||||
|
SomeMerkleProof* = ByteProof | CodexProof | Poseidon2Proof
|
||||||
|
SomeMerkleHash* = ByteHash | Poseidon2Hash
|
|
@ -0,0 +1,4 @@
|
||||||
|
import ./codex/codex
|
||||||
|
import ./codex/coders
|
||||||
|
|
||||||
|
export codex, coders
|
|
@ -0,0 +1,119 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
import pkg/upraises
|
||||||
|
|
||||||
|
push: {.upraises: [].}
|
||||||
|
|
||||||
|
import pkg/libp2p
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
import pkg/stew/byteutils
|
||||||
|
import pkg/serde/json
|
||||||
|
|
||||||
|
import ../../units
|
||||||
|
import ../../errors
|
||||||
|
|
||||||
|
import ./codex
|
||||||
|
|
||||||
|
const MaxMerkleTreeSize = 100.MiBs.uint
|
||||||
|
const MaxMerkleProofSize = 1.MiBs.uint
|
||||||
|
|
||||||
|
proc encode*(self: CodexTree): seq[byte] =
|
||||||
|
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||||
|
pb.write(1, self.mcodec.uint64)
|
||||||
|
pb.write(2, self.leavesCount.uint64)
|
||||||
|
for node in self.nodes:
|
||||||
|
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||||
|
nodesPb.write(1, node)
|
||||||
|
nodesPb.finish()
|
||||||
|
pb.write(3, nodesPb)
|
||||||
|
|
||||||
|
pb.finish
|
||||||
|
pb.buffer
|
||||||
|
|
||||||
|
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||||
|
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
||||||
|
var mcodecCode: uint64
|
||||||
|
var leavesCount: uint64
|
||||||
|
discard ? pb.getField(1, mcodecCode).mapFailure
|
||||||
|
discard ? pb.getField(2, leavesCount).mapFailure
|
||||||
|
|
||||||
|
let mcodec = MultiCodec.codec(mcodecCode.int)
|
||||||
|
if mcodec == InvalidMultiCodec:
|
||||||
|
return failure("Invalid MultiCodec code " & $mcodecCode)
|
||||||
|
|
||||||
|
var
|
||||||
|
nodesBuff: seq[seq[byte]]
|
||||||
|
nodes: seq[ByteHash]
|
||||||
|
|
||||||
|
if ? pb.getRepeatedField(3, nodesBuff).mapFailure:
|
||||||
|
for nodeBuff in nodesBuff:
|
||||||
|
var node: ByteHash
|
||||||
|
discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure
|
||||||
|
nodes.add node
|
||||||
|
|
||||||
|
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||||
|
|
||||||
|
proc encode*(self: CodexProof): seq[byte] =
|
||||||
|
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
||||||
|
pb.write(1, self.mcodec.uint64)
|
||||||
|
pb.write(2, self.index.uint64)
|
||||||
|
pb.write(3, self.nleaves.uint64)
|
||||||
|
|
||||||
|
for node in self.path:
|
||||||
|
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||||
|
nodesPb.write(1, node)
|
||||||
|
nodesPb.finish()
|
||||||
|
pb.write(4, nodesPb)
|
||||||
|
|
||||||
|
pb.finish
|
||||||
|
pb.buffer
|
||||||
|
|
||||||
|
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||||
|
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
||||||
|
var mcodecCode: uint64
|
||||||
|
var index: uint64
|
||||||
|
var nleaves: uint64
|
||||||
|
discard ? pb.getField(1, mcodecCode).mapFailure
|
||||||
|
|
||||||
|
let mcodec = MultiCodec.codec(mcodecCode.int)
|
||||||
|
if mcodec == InvalidMultiCodec:
|
||||||
|
return failure("Invalid MultiCodec code " & $mcodecCode)
|
||||||
|
|
||||||
|
discard ? pb.getField(2, index).mapFailure
|
||||||
|
discard ? pb.getField(3, nleaves).mapFailure
|
||||||
|
|
||||||
|
var
|
||||||
|
nodesBuff: seq[seq[byte]]
|
||||||
|
nodes: seq[ByteHash]
|
||||||
|
|
||||||
|
if ? pb.getRepeatedField(4, nodesBuff).mapFailure:
|
||||||
|
for nodeBuff in nodesBuff:
|
||||||
|
var node: ByteHash
|
||||||
|
let nodePb = initProtoBuffer(nodeBuff)
|
||||||
|
discard ? nodePb.getField(1, node).mapFailure
|
||||||
|
nodes.add node
|
||||||
|
|
||||||
|
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
|
||||||
|
|
||||||
|
proc fromJson*(
|
||||||
|
_: type CodexProof,
|
||||||
|
json: JsonNode
|
||||||
|
): ?!CodexProof =
|
||||||
|
expectJsonKind(Cid, JString, json)
|
||||||
|
var bytes: seq[byte]
|
||||||
|
try:
|
||||||
|
bytes = hexToSeqByte(json.str)
|
||||||
|
except ValueError as err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
CodexProof.decode(bytes)
|
||||||
|
|
||||||
|
func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode())
|
|
@ -0,0 +1,255 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/bitops
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
import pkg/libp2p/[cid, multicodec, multihash]
|
||||||
|
|
||||||
|
import ../../utils
|
||||||
|
import ../../rng
|
||||||
|
import ../../errors
|
||||||
|
import ../../blocktype
|
||||||
|
|
||||||
|
from ../../utils/digest import digestBytes
|
||||||
|
|
||||||
|
import ../merkletree
|
||||||
|
|
||||||
|
export merkletree
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "codex merkletree"
|
||||||
|
|
||||||
|
type
|
||||||
|
ByteTreeKey* {.pure.} = enum
|
||||||
|
KeyNone = 0x0.byte
|
||||||
|
KeyBottomLayer = 0x1.byte
|
||||||
|
KeyOdd = 0x2.byte
|
||||||
|
KeyOddAndBottomLayer = 0x3.byte
|
||||||
|
|
||||||
|
ByteHash* = seq[byte]
|
||||||
|
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
|
||||||
|
ByteProof* = MerkleProof[ByteHash, ByteTreeKey]
|
||||||
|
|
||||||
|
CodexTree* = ref object of ByteTree
|
||||||
|
mcodec*: MultiCodec
|
||||||
|
|
||||||
|
CodexProof* = ref object of ByteProof
|
||||||
|
mcodec*: MultiCodec
|
||||||
|
|
||||||
|
func mhash*(mcodec: MultiCodec): ?!MHash =
|
||||||
|
let
|
||||||
|
mhash = CodeHashes.getOrDefault(mcodec)
|
||||||
|
|
||||||
|
if isNil(mhash.coder):
|
||||||
|
return failure "Invalid multihash codec"
|
||||||
|
|
||||||
|
success mhash
|
||||||
|
|
||||||
|
func digestSize*(self: (CodexTree or CodexProof)): int =
|
||||||
|
## Number of leaves
|
||||||
|
##
|
||||||
|
|
||||||
|
self.mhash.size
|
||||||
|
|
||||||
|
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
||||||
|
var
|
||||||
|
proof = CodexProof(mcodec: self.mcodec)
|
||||||
|
|
||||||
|
? self.getProof(index, proof)
|
||||||
|
|
||||||
|
success proof
|
||||||
|
|
||||||
|
func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
|
||||||
|
## Verify hash
|
||||||
|
##
|
||||||
|
|
||||||
|
let
|
||||||
|
rootBytes = root.digestBytes
|
||||||
|
leafBytes = leaf.digestBytes
|
||||||
|
|
||||||
|
if self.mcodec != root.mcodec or
|
||||||
|
self.mcodec != leaf.mcodec:
|
||||||
|
return failure "Hash codec mismatch"
|
||||||
|
|
||||||
|
if rootBytes.len != root.size and
|
||||||
|
leafBytes.len != leaf.size:
|
||||||
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
|
self.verify(leafBytes, rootBytes)
|
||||||
|
|
||||||
|
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
|
||||||
|
self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure)
|
||||||
|
|
||||||
|
proc rootCid*(
|
||||||
|
self: CodexTree,
|
||||||
|
version = CIDv1,
|
||||||
|
dataCodec = DatasetRootCodec): ?!Cid =
|
||||||
|
|
||||||
|
if (? self.root).len == 0:
|
||||||
|
return failure "Empty root"
|
||||||
|
|
||||||
|
let
|
||||||
|
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
|
||||||
|
|
||||||
|
Cid.init(version, DatasetRootCodec, mhash).mapFailure
|
||||||
|
|
||||||
|
func getLeafCid*(
|
||||||
|
self: CodexTree,
|
||||||
|
i: Natural,
|
||||||
|
version = CIDv1,
|
||||||
|
dataCodec = BlockCodec): ?!Cid =
|
||||||
|
|
||||||
|
if i >= self.leavesCount:
|
||||||
|
return failure "Invalid leaf index " & $i
|
||||||
|
|
||||||
|
let
|
||||||
|
leaf = self.leaves[i]
|
||||||
|
mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure
|
||||||
|
|
||||||
|
Cid.init(version, dataCodec, mhash).mapFailure
|
||||||
|
|
||||||
|
proc `$`*(self: CodexTree): string =
|
||||||
|
let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none"
|
||||||
|
"CodexTree(" &
|
||||||
|
" root: " & root &
|
||||||
|
", leavesCount: " & $self.leavesCount &
|
||||||
|
", levels: " & $self.levels &
|
||||||
|
", mcodec: " & $self.mcodec & " )"
|
||||||
|
|
||||||
|
proc `$`*(self: CodexProof): string =
|
||||||
|
"CodexProof(" &
|
||||||
|
" nleaves: " & $self.nleaves &
|
||||||
|
", index: " & $self.index &
|
||||||
|
", path: " & $self.path.mapIt( byteutils.toHex(it) ) &
|
||||||
|
", mcodec: " & $self.mcodec & " )"
|
||||||
|
|
||||||
|
func compress*(
|
||||||
|
x, y: openArray[byte],
|
||||||
|
key: ByteTreeKey,
|
||||||
|
mhash: MHash): ?!ByteHash =
|
||||||
|
## Compress two hashes
|
||||||
|
##
|
||||||
|
|
||||||
|
var digest = newSeq[byte](mhash.size)
|
||||||
|
mhash.coder(@x & @y & @[ key.byte ], digest)
|
||||||
|
success digest
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type CodexTree,
|
||||||
|
mcodec: MultiCodec = Sha256HashCodec,
|
||||||
|
leaves: openArray[ByteHash]): ?!CodexTree =
|
||||||
|
|
||||||
|
if leaves.len == 0:
|
||||||
|
return failure "Empty leaves"
|
||||||
|
|
||||||
|
let
|
||||||
|
mhash = ? mcodec.mhash()
|
||||||
|
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||||
|
compress(x, y, key, mhash)
|
||||||
|
Zero: ByteHash = newSeq[byte](mhash.size)
|
||||||
|
|
||||||
|
if mhash.size != leaves[0].len:
|
||||||
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
|
var
|
||||||
|
self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||||
|
|
||||||
|
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||||
|
success self
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type CodexTree,
|
||||||
|
leaves: openArray[MultiHash]): ?!CodexTree =
|
||||||
|
|
||||||
|
if leaves.len == 0:
|
||||||
|
return failure "Empty leaves"
|
||||||
|
|
||||||
|
let
|
||||||
|
mcodec = leaves[0].mcodec
|
||||||
|
leaves = leaves.mapIt( it.digestBytes )
|
||||||
|
|
||||||
|
CodexTree.init(mcodec, leaves)
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type CodexTree,
|
||||||
|
leaves: openArray[Cid]): ?!CodexTree =
|
||||||
|
if leaves.len == 0:
|
||||||
|
return failure "Empty leaves"
|
||||||
|
|
||||||
|
let
|
||||||
|
mcodec = (? leaves[0].mhash.mapFailure).mcodec
|
||||||
|
leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes )
|
||||||
|
|
||||||
|
CodexTree.init(mcodec, leaves)
|
||||||
|
|
||||||
|
proc fromNodes*(
|
||||||
|
_: type CodexTree,
|
||||||
|
mcodec: MultiCodec = Sha256HashCodec,
|
||||||
|
nodes: openArray[ByteHash],
|
||||||
|
nleaves: int): ?!CodexTree =
|
||||||
|
|
||||||
|
if nodes.len == 0:
|
||||||
|
return failure "Empty nodes"
|
||||||
|
|
||||||
|
let
|
||||||
|
mhash = ? mcodec.mhash()
|
||||||
|
Zero = newSeq[byte](mhash.size)
|
||||||
|
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||||
|
compress(x, y, key, mhash)
|
||||||
|
|
||||||
|
if mhash.size != nodes[0].len:
|
||||||
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
|
var
|
||||||
|
self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec)
|
||||||
|
layer = nleaves
|
||||||
|
pos = 0
|
||||||
|
|
||||||
|
while pos < nodes.len:
|
||||||
|
self.layers.add( nodes[pos..<(pos + layer)] )
|
||||||
|
pos += layer
|
||||||
|
layer = divUp(layer, 2)
|
||||||
|
|
||||||
|
let
|
||||||
|
index = Rng.instance.rand(nleaves - 1)
|
||||||
|
proof = ? self.getProof(index)
|
||||||
|
|
||||||
|
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
|
||||||
|
return failure "Unable to verify tree built from nodes"
|
||||||
|
|
||||||
|
success self
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type CodexProof,
|
||||||
|
mcodec: MultiCodec = Sha256HashCodec,
|
||||||
|
index: int,
|
||||||
|
nleaves: int,
|
||||||
|
nodes: openArray[ByteHash]): ?!CodexProof =
|
||||||
|
|
||||||
|
if nodes.len == 0:
|
||||||
|
return failure "Empty nodes"
|
||||||
|
|
||||||
|
let
|
||||||
|
mhash = ? mcodec.mhash()
|
||||||
|
Zero = newSeq[byte](mhash.size)
|
||||||
|
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
|
||||||
|
compress(x, y, key, mhash)
|
||||||
|
|
||||||
|
success CodexProof(
|
||||||
|
compress: compressor,
|
||||||
|
zero: Zero,
|
||||||
|
mcodec: mcodec,
|
||||||
|
index: index,
|
||||||
|
nleaves: nleaves,
|
||||||
|
path: @nodes)
|
|
@ -0,0 +1,153 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/bitops
|
||||||
|
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import ../errors
|
||||||
|
|
||||||
|
type
|
||||||
|
CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
|
||||||
|
|
||||||
|
MerkleTree*[H, K] = ref object of RootObj
|
||||||
|
layers* : seq[seq[H]]
|
||||||
|
compress*: CompressFn[H, K]
|
||||||
|
zero* : H
|
||||||
|
|
||||||
|
MerkleProof*[H, K] = ref object of RootObj
|
||||||
|
index* : int # linear index of the leaf, starting from 0
|
||||||
|
path* : seq[H] # order: from the bottom to the top
|
||||||
|
nleaves* : int # number of leaves in the tree (=size of input)
|
||||||
|
compress*: CompressFn[H, K] # compress function
|
||||||
|
zero* : H # zero value
|
||||||
|
|
||||||
|
func depth*[H, K](self: MerkleTree[H, K]): int =
|
||||||
|
return self.layers.len - 1
|
||||||
|
|
||||||
|
func leavesCount*[H, K](self: MerkleTree[H, K]): int =
|
||||||
|
return self.layers[0].len
|
||||||
|
|
||||||
|
func levels*[H, K](self: MerkleTree[H, K]): int =
|
||||||
|
return self.layers.len
|
||||||
|
|
||||||
|
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] =
|
||||||
|
return self.layers[0]
|
||||||
|
|
||||||
|
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] =
|
||||||
|
for layer in self.layers:
|
||||||
|
yield layer
|
||||||
|
|
||||||
|
iterator nodes*[H, K](self: MerkleTree[H, K]): H =
|
||||||
|
for layer in self.layers:
|
||||||
|
for node in layer:
|
||||||
|
yield node
|
||||||
|
|
||||||
|
func root*[H, K](self: MerkleTree[H, K]): ?!H =
|
||||||
|
let last = self.layers[^1]
|
||||||
|
if last.len != 1:
|
||||||
|
return failure "invalid tree"
|
||||||
|
|
||||||
|
return success last[0]
|
||||||
|
|
||||||
|
func getProof*[H, K](
|
||||||
|
self: MerkleTree[H, K],
|
||||||
|
index: int,
|
||||||
|
proof: MerkleProof[H, K]): ?!void =
|
||||||
|
let depth = self.depth
|
||||||
|
let nleaves = self.leavesCount
|
||||||
|
|
||||||
|
if not (index >= 0 and index < nleaves):
|
||||||
|
return failure "index out of bounds"
|
||||||
|
|
||||||
|
var path : seq[H] = newSeq[H](depth)
|
||||||
|
var k = index
|
||||||
|
var m = nleaves
|
||||||
|
for i in 0..<depth:
|
||||||
|
let j = k xor 1
|
||||||
|
path[i] = if (j < m): self.layers[i][j] else: self.zero
|
||||||
|
k = k shr 1
|
||||||
|
m = (m + 1) shr 1
|
||||||
|
|
||||||
|
proof.index = index
|
||||||
|
proof.path = path
|
||||||
|
proof.nleaves = nleaves
|
||||||
|
proof.compress = self.compress
|
||||||
|
|
||||||
|
success()
|
||||||
|
|
||||||
|
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
|
||||||
|
var
|
||||||
|
proof = MerkleProof[H, K]()
|
||||||
|
|
||||||
|
? self.getProof(index, proof)
|
||||||
|
|
||||||
|
success proof
|
||||||
|
|
||||||
|
func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
|
||||||
|
var
|
||||||
|
m = proof.nleaves
|
||||||
|
j = proof.index
|
||||||
|
h = leaf
|
||||||
|
bottomFlag = K.KeyBottomLayer
|
||||||
|
|
||||||
|
for p in proof.path:
|
||||||
|
let oddIndex : bool = (bitand(j,1) != 0)
|
||||||
|
if oddIndex:
|
||||||
|
# the index of the child is odd, so the node itself can't be odd (a bit counterintuitive, yeah :)
|
||||||
|
h = ? proof.compress( p, h, bottomFlag )
|
||||||
|
else:
|
||||||
|
if j == m - 1:
|
||||||
|
# single child => odd node
|
||||||
|
h = ? proof.compress( h, p, K(bottomFlag.ord + 2) )
|
||||||
|
else:
|
||||||
|
# even node
|
||||||
|
h = ? proof.compress( h , p, bottomFlag )
|
||||||
|
bottomFlag = K.KeyNone
|
||||||
|
j = j shr 1
|
||||||
|
m = (m+1) shr 1
|
||||||
|
|
||||||
|
return success h
|
||||||
|
|
||||||
|
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
|
||||||
|
success bool(root == ? proof.reconstructRoot(leaf))
|
||||||
|
|
||||||
|
func merkleTreeWorker*[H, K](
|
||||||
|
self: MerkleTree[H, K],
|
||||||
|
xs: openArray[H],
|
||||||
|
isBottomLayer: static bool): ?!seq[seq[H]] =
|
||||||
|
|
||||||
|
let a = low(xs)
|
||||||
|
let b = high(xs)
|
||||||
|
let m = b - a + 1
|
||||||
|
|
||||||
|
when not isBottomLayer:
|
||||||
|
if m == 1:
|
||||||
|
return success @[ @xs ]
|
||||||
|
|
||||||
|
let halfn: int = m div 2
|
||||||
|
let n : int = 2 * halfn
|
||||||
|
let isOdd: bool = (n != m)
|
||||||
|
|
||||||
|
var ys: seq[H]
|
||||||
|
if not isOdd:
|
||||||
|
ys = newSeq[H](halfn)
|
||||||
|
else:
|
||||||
|
ys = newSeq[H](halfn + 1)
|
||||||
|
|
||||||
|
for i in 0..<halfn:
|
||||||
|
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
|
||||||
|
ys[i] = ? self.compress( xs[a + 2 * i], xs[a + 2 * i + 1], key = key )
|
||||||
|
if isOdd:
|
||||||
|
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
|
||||||
|
ys[halfn] = ? self.compress( xs[n], self.zero, key = key )
|
||||||
|
|
||||||
|
success @[ @xs ] & ? self.merkleTreeWorker(ys, isBottomLayer = false)
|
|
@ -0,0 +1,148 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
|
import pkg/poseidon2
|
||||||
|
import pkg/constantine/math/io/io_fields
|
||||||
|
import pkg/constantine/platforms/abstractions
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import ../utils
|
||||||
|
import ../rng
|
||||||
|
|
||||||
|
import ./merkletree
|
||||||
|
|
||||||
|
export merkletree, poseidon2
|
||||||
|
|
||||||
|
const
|
||||||
|
KeyNoneF = F.fromhex("0x0")
|
||||||
|
KeyBottomLayerF = F.fromhex("0x1")
|
||||||
|
KeyOddF = F.fromhex("0x2")
|
||||||
|
KeyOddAndBottomLayerF = F.fromhex("0x3")
|
||||||
|
|
||||||
|
Poseidon2Zero* = zero
|
||||||
|
|
||||||
|
type
|
||||||
|
Bn254Fr* = F
|
||||||
|
Poseidon2Hash* = Bn254Fr
|
||||||
|
|
||||||
|
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
|
||||||
|
KeyNone
|
||||||
|
KeyBottomLayer
|
||||||
|
KeyOdd
|
||||||
|
KeyOddAndBottomLayer
|
||||||
|
|
||||||
|
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
|
||||||
|
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
|
||||||
|
|
||||||
|
proc `$`*(self: Poseidon2Tree): string =
|
||||||
|
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||||
|
"Poseidon2Tree(" &
|
||||||
|
" root: " & root &
|
||||||
|
", leavesCount: " & $self.leavesCount &
|
||||||
|
", levels: " & $self.levels & " )"
|
||||||
|
|
||||||
|
proc `$`*(self: Poseidon2Proof): string =
|
||||||
|
"Poseidon2Proof(" &
|
||||||
|
" nleaves: " & $self.nleaves &
|
||||||
|
", index: " & $self.index &
|
||||||
|
", path: " & $self.path.mapIt( it.toHex ) & " )"
|
||||||
|
|
||||||
|
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
||||||
|
result[0..<bytes.len] = bytes[0..<bytes.len]
|
||||||
|
|
||||||
|
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||||
|
case key:
|
||||||
|
of KeyNone: KeyNoneF
|
||||||
|
of KeyBottomLayer: KeyBottomLayerF
|
||||||
|
of KeyOdd: KeyOddF
|
||||||
|
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type Poseidon2Tree,
|
||||||
|
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||||
|
|
||||||
|
if leaves.len == 0:
|
||||||
|
return failure "Empty leaves"
|
||||||
|
|
||||||
|
let
|
||||||
|
compressor = proc(
|
||||||
|
x, y: Poseidon2Hash,
|
||||||
|
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||||
|
success compress( x, y, key.toKey )
|
||||||
|
|
||||||
|
var
|
||||||
|
self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||||
|
|
||||||
|
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||||
|
success self
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type Poseidon2Tree,
|
||||||
|
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||||
|
Poseidon2Tree.init(
|
||||||
|
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
|
||||||
|
|
||||||
|
proc fromNodes*(
|
||||||
|
_: type Poseidon2Tree,
|
||||||
|
nodes: openArray[Poseidon2Hash],
|
||||||
|
nleaves: int): ?!Poseidon2Tree =
|
||||||
|
|
||||||
|
if nodes.len == 0:
|
||||||
|
return failure "Empty nodes"
|
||||||
|
|
||||||
|
let
|
||||||
|
compressor = proc(
|
||||||
|
x, y: Poseidon2Hash,
|
||||||
|
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||||
|
success compress( x, y, key.toKey )
|
||||||
|
|
||||||
|
var
|
||||||
|
self = Poseidon2Tree(compress: compressor, zero: zero)
|
||||||
|
layer = nleaves
|
||||||
|
pos = 0
|
||||||
|
|
||||||
|
while pos < nodes.len:
|
||||||
|
self.layers.add( nodes[pos..<(pos + layer)] )
|
||||||
|
pos += layer
|
||||||
|
layer = divUp(layer, 2)
|
||||||
|
|
||||||
|
let
|
||||||
|
index = Rng.instance.rand(nleaves - 1)
|
||||||
|
proof = ? self.getProof(index)
|
||||||
|
|
||||||
|
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
|
||||||
|
return failure "Unable to verify tree built from nodes"
|
||||||
|
|
||||||
|
success self
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
_: type Poseidon2Proof,
|
||||||
|
index: int,
|
||||||
|
nleaves: int,
|
||||||
|
nodes: openArray[Poseidon2Hash]): ?!Poseidon2Proof =
|
||||||
|
|
||||||
|
if nodes.len == 0:
|
||||||
|
return failure "Empty nodes"
|
||||||
|
|
||||||
|
let
|
||||||
|
compressor = proc(
|
||||||
|
x, y: Poseidon2Hash,
|
||||||
|
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||||
|
success compress( x, y, key.toKey )
|
||||||
|
|
||||||
|
success Poseidon2Proof(
|
||||||
|
compress: compressor,
|
||||||
|
zero: Poseidon2Zero,
|
||||||
|
index: index,
|
||||||
|
nleaves: nleaves,
|
||||||
|
path: @nodes)
|
|
@ -7,13 +7,18 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/os
|
|
||||||
|
|
||||||
const
|
const
|
||||||
CodexRepoNamespace* = "/repo" # repository namespace, blocks and manifests are subkeys
|
# Namespaces
|
||||||
CodexBlocksNamespace* = CodexRepoNamespace / "blocks" # blocks namespace
|
CodexMetaNamespace* = "meta" # meta info stored here
|
||||||
CodexManifestNamespace* = CodexRepoNamespace / "manifests" # manifest namespace
|
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||||
CodexBlocksPersistNamespace* = # Cid's of persisted blocks goes here
|
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
|
||||||
CodexMetaNamespace / "blocks" / "persist"
|
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||||
|
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
||||||
CodexBlocksTtlNamespace* = # Cid TTL
|
CodexBlocksTtlNamespace* = # Cid TTL
|
||||||
CodexMetaNamespace / "blocks" / "ttl"
|
CodexMetaNamespace & "/ttl"
|
||||||
|
CodexBlockProofNamespace* = # Cid and Proof
|
||||||
|
CodexMetaNamespace & "/proof"
|
||||||
|
CodexDhtNamespace* = "dht" # Dht namespace
|
||||||
|
CodexDhtProvidersNamespace* = # Dht providers namespace
|
||||||
|
CodexDhtNamespace & "/providers"
|
||||||
|
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace
|
||||||
|
|
925
codex/node.nim
925
codex/node.nim
File diff suppressed because it is too large
Load Diff
|
@ -1,79 +0,0 @@
|
||||||
import std/sets
|
|
||||||
import pkg/upraises
|
|
||||||
import pkg/questionable
|
|
||||||
import pkg/chronicles
|
|
||||||
import ./storageproofs
|
|
||||||
import ./clock
|
|
||||||
|
|
||||||
export sets
|
|
||||||
export storageproofs
|
|
||||||
|
|
||||||
type
|
|
||||||
Proving* = ref object
|
|
||||||
proofs: Proofs
|
|
||||||
clock: Clock
|
|
||||||
loop: ?Future[void]
|
|
||||||
slots*: HashSet[SlotId]
|
|
||||||
onProofRequired: ?OnProofRequired
|
|
||||||
OnProofRequired* = proc (id: SlotId) {.gcsafe, upraises:[].}
|
|
||||||
|
|
||||||
func new*(_: type Proving, proofs: Proofs, clock: Clock): Proving =
|
|
||||||
Proving(proofs: proofs, clock: clock)
|
|
||||||
|
|
||||||
proc `onProofRequired=`*(proving: Proving, callback: OnProofRequired) =
|
|
||||||
proving.onProofRequired = some callback
|
|
||||||
|
|
||||||
func add*(proving: Proving, id: SlotId) =
|
|
||||||
proving.slots.incl(id)
|
|
||||||
|
|
||||||
proc getCurrentPeriod(proving: Proving): Future[Period] {.async.} =
|
|
||||||
let periodicity = await proving.proofs.periodicity()
|
|
||||||
return periodicity.periodOf(proving.clock.now().u256)
|
|
||||||
|
|
||||||
proc waitUntilPeriod(proving: Proving, period: Period) {.async.} =
|
|
||||||
let periodicity = await proving.proofs.periodicity()
|
|
||||||
await proving.clock.waitUntil(periodicity.periodStart(period).truncate(int64))
|
|
||||||
|
|
||||||
proc removeEndedContracts(proving: Proving) {.async.} =
|
|
||||||
let now = proving.clock.now().u256
|
|
||||||
var ended: HashSet[SlotId]
|
|
||||||
for id in proving.slots:
|
|
||||||
if now >= (await proving.proofs.getProofEnd(id)):
|
|
||||||
ended.incl(id)
|
|
||||||
proving.slots.excl(ended)
|
|
||||||
|
|
||||||
proc run(proving: Proving) {.async.} =
|
|
||||||
try:
|
|
||||||
while true:
|
|
||||||
let currentPeriod = await proving.getCurrentPeriod()
|
|
||||||
await proving.removeEndedContracts()
|
|
||||||
for id in proving.slots:
|
|
||||||
if (await proving.proofs.isProofRequired(id)) or
|
|
||||||
(await proving.proofs.willProofBeRequired(id)):
|
|
||||||
if callback =? proving.onProofRequired:
|
|
||||||
callback(id)
|
|
||||||
await proving.waitUntilPeriod(currentPeriod + 1)
|
|
||||||
except CancelledError:
|
|
||||||
discard
|
|
||||||
except CatchableError as e:
|
|
||||||
error "Proving failed", msg = e.msg
|
|
||||||
|
|
||||||
proc start*(proving: Proving) {.async.} =
|
|
||||||
if proving.loop.isSome:
|
|
||||||
return
|
|
||||||
|
|
||||||
proving.loop = some proving.run()
|
|
||||||
|
|
||||||
proc stop*(proving: Proving) {.async.} =
|
|
||||||
if loop =? proving.loop:
|
|
||||||
proving.loop = Future[void].none
|
|
||||||
if not loop.finished:
|
|
||||||
await loop.cancelAndWait()
|
|
||||||
|
|
||||||
proc submitProof*(proving: Proving, id: SlotId, proof: seq[byte]) {.async.} =
|
|
||||||
await proving.proofs.submitProof(id, proof)
|
|
||||||
|
|
||||||
proc subscribeProofSubmission*(proving: Proving,
|
|
||||||
callback: OnProofSubmitted):
|
|
||||||
Future[Subscription] =
|
|
||||||
proving.proofs.subscribeProofSubmission(callback)
|
|
|
@ -18,18 +18,15 @@ type
|
||||||
clock: Clock
|
clock: Clock
|
||||||
purchases: Table[PurchaseId, Purchase]
|
purchases: Table[PurchaseId, Purchase]
|
||||||
proofProbability*: UInt256
|
proofProbability*: UInt256
|
||||||
requestExpiryInterval*: UInt256
|
|
||||||
PurchaseTimeout* = Timeout
|
PurchaseTimeout* = Timeout
|
||||||
|
|
||||||
const DefaultProofProbability = 100.u256
|
const DefaultProofProbability = 100.u256
|
||||||
const DefaultRequestExpiryInterval = (10 * 60).u256
|
|
||||||
|
|
||||||
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
||||||
Purchasing(
|
Purchasing(
|
||||||
market: market,
|
market: market,
|
||||||
clock: clock,
|
clock: clock,
|
||||||
proofProbability: DefaultProofProbability,
|
proofProbability: DefaultProofProbability,
|
||||||
requestExpiryInterval: DefaultRequestExpiryInterval,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
proc load*(purchasing: Purchasing) {.async.} =
|
proc load*(purchasing: Purchasing) {.async.} =
|
||||||
|
@ -47,12 +44,11 @@ proc stop*(purchasing: Purchasing) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc populate*(purchasing: Purchasing,
|
proc populate*(purchasing: Purchasing,
|
||||||
request: StorageRequest): Future[StorageRequest] {.async.} =
|
request: StorageRequest
|
||||||
|
): Future[StorageRequest] {.async.} =
|
||||||
result = request
|
result = request
|
||||||
if result.ask.proofProbability == 0.u256:
|
if result.ask.proofProbability == 0.u256:
|
||||||
result.ask.proofProbability = purchasing.proofProbability
|
result.ask.proofProbability = purchasing.proofProbability
|
||||||
if result.expiry == 0.u256:
|
|
||||||
result.expiry = (purchasing.clock.now().u256 + purchasing.requestExpiryInterval)
|
|
||||||
if result.nonce == Nonce.default:
|
if result.nonce == Nonce.default:
|
||||||
var id = result.nonce.toArray
|
var id = result.nonce.toArray
|
||||||
doAssert randomBytes(id) == 32
|
doAssert randomBytes(id) == 32
|
||||||
|
@ -60,7 +56,8 @@ proc populate*(purchasing: Purchasing,
|
||||||
result.client = await purchasing.market.getSigner()
|
result.client = await purchasing.market.getSigner()
|
||||||
|
|
||||||
proc purchase*(purchasing: Purchasing,
|
proc purchase*(purchasing: Purchasing,
|
||||||
request: StorageRequest): Future[Purchase] {.async.} =
|
request: StorageRequest
|
||||||
|
): Future[Purchase] {.async.} =
|
||||||
let request = await purchasing.populate(request)
|
let request = await purchasing.populate(request)
|
||||||
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
||||||
purchase.start()
|
purchase.start()
|
||||||
|
@ -72,3 +69,10 @@ func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase =
|
||||||
some purchasing.purchases[id]
|
some purchasing.purchases[id]
|
||||||
else:
|
else:
|
||||||
none Purchase
|
none Purchase
|
||||||
|
|
||||||
|
func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
|
||||||
|
var pIds: seq[PurchaseId] = @[]
|
||||||
|
for key in purchasing.purchases.keys:
|
||||||
|
pIds.add(key)
|
||||||
|
return pIds
|
||||||
|
|
||||||
|
|
|
@ -24,30 +24,39 @@ export Purchase
|
||||||
export purchaseid
|
export purchaseid
|
||||||
export statemachine
|
export statemachine
|
||||||
|
|
||||||
func new*(_: type Purchase,
|
func new*(
|
||||||
requestId: RequestId,
|
_: type Purchase,
|
||||||
market: Market,
|
requestId: RequestId,
|
||||||
clock: Clock): Purchase =
|
market: Market,
|
||||||
Purchase(
|
clock: Clock
|
||||||
future: Future[void].new(),
|
): Purchase =
|
||||||
requestId: requestId,
|
## create a new instance of a Purchase
|
||||||
market: market,
|
##
|
||||||
clock: clock
|
var purchase = Purchase.new()
|
||||||
)
|
{.cast(noSideEffect).}:
|
||||||
|
purchase.future = newFuture[void]()
|
||||||
|
purchase.requestId = requestId
|
||||||
|
purchase.market = market
|
||||||
|
purchase.clock = clock
|
||||||
|
|
||||||
func new*(_: type Purchase,
|
return purchase
|
||||||
request: StorageRequest,
|
|
||||||
market: Market,
|
func new*(
|
||||||
clock: Clock): Purchase =
|
_: type Purchase,
|
||||||
|
request: StorageRequest,
|
||||||
|
market: Market,
|
||||||
|
clock: Clock
|
||||||
|
): Purchase =
|
||||||
|
## Create a new purchase using the given market and clock
|
||||||
let purchase = Purchase.new(request.id, market, clock)
|
let purchase = Purchase.new(request.id, market, clock)
|
||||||
purchase.request = some request
|
purchase.request = some request
|
||||||
return purchase
|
return purchase
|
||||||
|
|
||||||
proc start*(purchase: Purchase) =
|
proc start*(purchase: Purchase) =
|
||||||
purchase.switch(PurchasePending())
|
purchase.start(PurchasePending())
|
||||||
|
|
||||||
proc load*(purchase: Purchase) =
|
proc load*(purchase: Purchase) =
|
||||||
purchase.switch(PurchaseUnknown())
|
purchase.start(PurchaseUnknown())
|
||||||
|
|
||||||
proc wait*(purchase: Purchase) {.async.} =
|
proc wait*(purchase: Purchase) {.async.} =
|
||||||
await purchase.future
|
await purchase.future
|
||||||
|
@ -63,3 +72,8 @@ func error*(purchase: Purchase): ?(ref CatchableError) =
|
||||||
some purchase.future.error
|
some purchase.future.error
|
||||||
else:
|
else:
|
||||||
none (ref CatchableError)
|
none (ref CatchableError)
|
||||||
|
|
||||||
|
func state*(purchase: Purchase): ?string =
|
||||||
|
proc description(state: State): string =
|
||||||
|
$state
|
||||||
|
purchase.query(description)
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
import std/hashes
|
import std/hashes
|
||||||
import pkg/nimcrypto
|
import pkg/nimcrypto
|
||||||
|
import ../logutils
|
||||||
|
|
||||||
type PurchaseId* = distinct array[32, byte]
|
type PurchaseId* = distinct array[32, byte]
|
||||||
|
|
||||||
|
logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog
|
||||||
|
|
||||||
proc hash*(x: PurchaseId): Hash {.borrow.}
|
proc hash*(x: PurchaseId): Hash {.borrow.}
|
||||||
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
||||||
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
|
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
|
||||||
|
|
|
@ -1,21 +1,18 @@
|
||||||
import ../utils/statemachine
|
import ../utils/asyncstatemachine
|
||||||
import ../market
|
import ../market
|
||||||
import ../clock
|
import ../clock
|
||||||
import ../errors
|
import ../errors
|
||||||
|
|
||||||
export market
|
export market
|
||||||
export clock
|
export clock
|
||||||
export statemachine
|
export asyncstatemachine
|
||||||
|
|
||||||
type
|
type
|
||||||
Purchase* = ref object of StateMachine
|
Purchase* = ref object of Machine
|
||||||
future*: Future[void]
|
future*: Future[void]
|
||||||
market*: Market
|
market*: Market
|
||||||
clock*: Clock
|
clock*: Clock
|
||||||
requestId*: RequestId
|
requestId*: RequestId
|
||||||
request*: ?StorageRequest
|
request*: ?StorageRequest
|
||||||
PurchaseState* = ref object of AsyncState
|
PurchaseState* = ref object of State
|
||||||
PurchaseError* = object of CodexError
|
PurchaseError* = object of CodexError
|
||||||
|
|
||||||
method description*(state: PurchaseState): string {.base.} =
|
|
||||||
raiseAssert "description not implemented for state"
|
|
||||||
|
|
|
@ -1,20 +1,25 @@
|
||||||
|
import pkg/metrics
|
||||||
|
|
||||||
|
import ../../logutils
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ./error
|
import ./errorhandling
|
||||||
|
|
||||||
type PurchaseCancelled* = ref object of PurchaseState
|
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
|
||||||
|
|
||||||
method enterAsync*(state: PurchaseCancelled) {.async.} =
|
logScope:
|
||||||
without purchase =? (state.context as Purchase):
|
topics = "marketplace purchases cancelled"
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
|
||||||
try:
|
type PurchaseCancelled* = ref object of ErrorHandlingState
|
||||||
await purchase.market.withdrawFunds(purchase.requestId)
|
|
||||||
except CatchableError as error:
|
method `$`*(state: PurchaseCancelled): string =
|
||||||
state.switch(PurchaseErrored(error: error))
|
"cancelled"
|
||||||
return
|
|
||||||
|
method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_cancelled.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
|
||||||
|
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
|
||||||
|
await purchase.market.withdrawFunds(purchase.requestId)
|
||||||
|
|
||||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||||
state.switch(PurchaseErrored(error: error))
|
purchase.future.fail(error)
|
||||||
|
|
||||||
method description*(state: PurchaseCancelled): string =
|
|
||||||
"cancelled"
|
|
||||||
|
|
|
@ -1,13 +1,23 @@
|
||||||
|
import pkg/metrics
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
|
import ../../utils/exceptions
|
||||||
|
import ../../logutils
|
||||||
|
|
||||||
|
declareCounter(codex_purchases_error, "codex purchases error")
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "marketplace purchases errored"
|
||||||
|
|
||||||
type PurchaseErrored* = ref object of PurchaseState
|
type PurchaseErrored* = ref object of PurchaseState
|
||||||
error*: ref CatchableError
|
error*: ref CatchableError
|
||||||
|
|
||||||
method enter*(state: PurchaseErrored) =
|
method `$`*(state: PurchaseErrored): string =
|
||||||
without purchase =? (state.context as Purchase):
|
"errored"
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_error.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
|
||||||
|
error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId
|
||||||
|
|
||||||
purchase.future.fail(state.error)
|
purchase.future.fail(state.error)
|
||||||
|
|
||||||
method description*(state: PurchaseErrored): string =
|
|
||||||
"errored"
|
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
import pkg/questionable
|
||||||
|
import ../statemachine
|
||||||
|
import ./error
|
||||||
|
|
||||||
|
type
|
||||||
|
ErrorHandlingState* = ref object of PurchaseState
|
||||||
|
|
||||||
|
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||||
|
some State(PurchaseErrored(error: error))
|
|
@ -1,12 +1,16 @@
|
||||||
|
import pkg/metrics
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ./error
|
import ./error
|
||||||
|
|
||||||
|
declareCounter(codex_purchases_failed, "codex purchases failed")
|
||||||
|
|
||||||
type
|
type
|
||||||
PurchaseFailed* = ref object of PurchaseState
|
PurchaseFailed* = ref object of PurchaseState
|
||||||
|
|
||||||
method enter*(state: PurchaseFailed) =
|
method `$`*(state: PurchaseFailed): string =
|
||||||
let error = newException(PurchaseError, "Purchase failed")
|
|
||||||
state.switch(PurchaseErrored(error: error))
|
|
||||||
|
|
||||||
method description*(state: PurchaseFailed): string =
|
|
||||||
"failed"
|
"failed"
|
||||||
|
|
||||||
|
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_failed.inc()
|
||||||
|
let error = newException(PurchaseError, "Purchase failed")
|
||||||
|
return some State(PurchaseErrored(error: error))
|
||||||
|
|
|
@ -1,12 +1,20 @@
|
||||||
|
import pkg/metrics
|
||||||
|
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
|
import ../../logutils
|
||||||
|
|
||||||
|
declareCounter(codex_purchases_finished, "codex purchases finished")
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "marketplace purchases finished"
|
||||||
|
|
||||||
type PurchaseFinished* = ref object of PurchaseState
|
type PurchaseFinished* = ref object of PurchaseState
|
||||||
|
|
||||||
method enter*(state: PurchaseFinished) =
|
method `$`*(state: PurchaseFinished): string =
|
||||||
without purchase =? (state.context as Purchase):
|
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
|
||||||
purchase.future.complete()
|
|
||||||
|
|
||||||
method description*(state: PurchaseFinished): string =
|
|
||||||
"finished"
|
"finished"
|
||||||
|
|
||||||
|
method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_finished.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
info "Purchase finished", requestId = purchase.requestId
|
||||||
|
purchase.future.complete()
|
||||||
|
|
|
@ -1,21 +1,18 @@
|
||||||
|
import pkg/metrics
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
|
import ./errorhandling
|
||||||
import ./submitted
|
import ./submitted
|
||||||
import ./error
|
|
||||||
|
|
||||||
type PurchasePending* = ref object of PurchaseState
|
declareCounter(codex_purchases_pending, "codex purchases pending")
|
||||||
|
|
||||||
method enterAsync(state: PurchasePending) {.async.} =
|
type PurchasePending* = ref object of ErrorHandlingState
|
||||||
without purchase =? (state.context as Purchase) and
|
|
||||||
request =? purchase.request:
|
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
|
||||||
try:
|
method `$`*(state: PurchasePending): string =
|
||||||
await purchase.market.requestStorage(request)
|
|
||||||
except CatchableError as error:
|
|
||||||
state.switch(PurchaseErrored(error: error))
|
|
||||||
return
|
|
||||||
|
|
||||||
state.switch(PurchaseSubmitted())
|
|
||||||
|
|
||||||
method description*(state: PurchasePending): string =
|
|
||||||
"pending"
|
"pending"
|
||||||
|
|
||||||
|
method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_pending.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
let request = !purchase.request
|
||||||
|
await purchase.market.requestStorage(request)
|
||||||
|
return some State(PurchaseSubmitted())
|
||||||
|
|
|
@ -1,32 +1,41 @@
|
||||||
|
import pkg/metrics
|
||||||
|
|
||||||
|
import ../../logutils
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ./error
|
import ./errorhandling
|
||||||
import ./finished
|
import ./finished
|
||||||
import ./failed
|
import ./failed
|
||||||
|
|
||||||
type PurchaseStarted* = ref object of PurchaseState
|
declareCounter(codex_purchases_started, "codex purchases started")
|
||||||
|
|
||||||
method enterAsync*(state: PurchaseStarted) {.async.} =
|
logScope:
|
||||||
without purchase =? (state.context as Purchase):
|
topics = "marketplace purchases started"
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
type PurchaseStarted* = ref object of ErrorHandlingState
|
||||||
|
|
||||||
|
method `$`*(state: PurchaseStarted): string =
|
||||||
|
"started"
|
||||||
|
|
||||||
|
method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_started.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
|
||||||
let clock = purchase.clock
|
let clock = purchase.clock
|
||||||
let market = purchase.market
|
let market = purchase.market
|
||||||
|
info "All required slots filled, purchase started", requestId = purchase.requestId
|
||||||
|
|
||||||
let failed = newFuture[void]()
|
let failed = newFuture[void]()
|
||||||
proc callback(_: RequestId) =
|
proc callback(_: RequestId) =
|
||||||
failed.complete()
|
failed.complete()
|
||||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||||
|
|
||||||
let ended = clock.waitUntil(await market.getRequestEnd(purchase.requestId))
|
# Ensure that we're past the request end by waiting an additional second
|
||||||
try:
|
let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||||
let fut = await one(ended, failed)
|
let fut = await one(ended, failed)
|
||||||
if fut.id == failed.id:
|
await subscription.unsubscribe()
|
||||||
state.switch(PurchaseFailed())
|
if fut.id == failed.id:
|
||||||
else:
|
ended.cancel()
|
||||||
state.switch(PurchaseFinished())
|
return some State(PurchaseFailed())
|
||||||
await subscription.unsubscribe()
|
else:
|
||||||
except CatchableError as error:
|
failed.cancel()
|
||||||
state.switch(PurchaseErrored(error: error))
|
return some State(PurchaseFinished())
|
||||||
|
|
||||||
method description*(state: PurchaseStarted): string =
|
|
||||||
"started"
|
|
||||||
|
|
|
@ -1,18 +1,30 @@
|
||||||
|
import pkg/metrics
|
||||||
|
|
||||||
|
import ../../logutils
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ./error
|
import ./errorhandling
|
||||||
import ./started
|
import ./started
|
||||||
import ./cancelled
|
import ./cancelled
|
||||||
|
|
||||||
type PurchaseSubmitted* = ref object of PurchaseState
|
logScope:
|
||||||
|
topics = "marketplace purchases submitted"
|
||||||
|
|
||||||
method enterAsync(state: PurchaseSubmitted) {.async.} =
|
declareCounter(codex_purchases_submitted, "codex purchases submitted")
|
||||||
without purchase =? (state.context as Purchase) and
|
|
||||||
request =? purchase.request:
|
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
|
||||||
|
type PurchaseSubmitted* = ref object of ErrorHandlingState
|
||||||
|
|
||||||
|
method `$`*(state: PurchaseSubmitted): string =
|
||||||
|
"submitted"
|
||||||
|
|
||||||
|
method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_submitted.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
let request = !purchase.request
|
||||||
let market = purchase.market
|
let market = purchase.market
|
||||||
let clock = purchase.clock
|
let clock = purchase.clock
|
||||||
|
|
||||||
|
info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId
|
||||||
|
|
||||||
proc wait {.async.} =
|
proc wait {.async.} =
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc callback(_: RequestId) =
|
proc callback(_: RequestId) =
|
||||||
|
@ -22,19 +34,13 @@ method enterAsync(state: PurchaseSubmitted) {.async.} =
|
||||||
await subscription.unsubscribe()
|
await subscription.unsubscribe()
|
||||||
|
|
||||||
proc withTimeout(future: Future[void]) {.async.} =
|
proc withTimeout(future: Future[void]) {.async.} =
|
||||||
let expiry = request.expiry.truncate(int64)
|
let expiry = (await market.requestExpiresAt(request.id)) + 1
|
||||||
|
trace "waiting for request fulfillment or expiry", expiry
|
||||||
await future.withTimeout(clock, expiry)
|
await future.withTimeout(clock, expiry)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await wait().withTimeout()
|
await wait().withTimeout()
|
||||||
except Timeout:
|
except Timeout:
|
||||||
state.switch(PurchaseCancelled())
|
return some State(PurchaseCancelled())
|
||||||
return
|
|
||||||
except CatchableError as error:
|
|
||||||
state.switch(PurchaseErrored(error: error))
|
|
||||||
return
|
|
||||||
|
|
||||||
state.switch(PurchaseStarted())
|
return some State(PurchaseStarted())
|
||||||
|
|
||||||
method description*(state: PurchaseSubmitted): string =
|
|
||||||
"submitted"
|
|
||||||
|
|
|
@ -1,37 +1,35 @@
|
||||||
|
import pkg/metrics
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
|
import ./errorhandling
|
||||||
import ./submitted
|
import ./submitted
|
||||||
import ./started
|
import ./started
|
||||||
import ./cancelled
|
import ./cancelled
|
||||||
import ./finished
|
import ./finished
|
||||||
import ./failed
|
import ./failed
|
||||||
import ./error
|
|
||||||
|
|
||||||
type PurchaseUnknown* = ref object of PurchaseState
|
declareCounter(codex_purchases_unknown, "codex purchases unknown")
|
||||||
|
|
||||||
method enterAsync(state: PurchaseUnknown) {.async.} =
|
type PurchaseUnknown* = ref object of ErrorHandlingState
|
||||||
without purchase =? (state.context as Purchase):
|
|
||||||
raiseAssert "invalid state"
|
|
||||||
|
|
||||||
try:
|
method `$`*(state: PurchaseUnknown): string =
|
||||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
|
||||||
(requestState =? await purchase.market.getState(purchase.requestId)):
|
|
||||||
|
|
||||||
purchase.request = some request
|
|
||||||
|
|
||||||
case requestState
|
|
||||||
of RequestState.New:
|
|
||||||
state.switch(PurchaseSubmitted())
|
|
||||||
of RequestState.Started:
|
|
||||||
state.switch(PurchaseStarted())
|
|
||||||
of RequestState.Cancelled:
|
|
||||||
state.switch(PurchaseCancelled())
|
|
||||||
of RequestState.Finished:
|
|
||||||
state.switch(PurchaseFinished())
|
|
||||||
of RequestState.Failed:
|
|
||||||
state.switch(PurchaseFailed())
|
|
||||||
|
|
||||||
except CatchableError as error:
|
|
||||||
state.switch(PurchaseErrored(error: error))
|
|
||||||
|
|
||||||
method description*(state: PurchaseUnknown): string =
|
|
||||||
"unknown"
|
"unknown"
|
||||||
|
|
||||||
|
method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} =
|
||||||
|
codex_purchases_unknown.inc()
|
||||||
|
let purchase = Purchase(machine)
|
||||||
|
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||||
|
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||||
|
|
||||||
|
purchase.request = some request
|
||||||
|
|
||||||
|
case requestState
|
||||||
|
of RequestState.New:
|
||||||
|
return some State(PurchaseSubmitted())
|
||||||
|
of RequestState.Started:
|
||||||
|
return some State(PurchaseStarted())
|
||||||
|
of RequestState.Cancelled:
|
||||||
|
return some State(PurchaseCancelled())
|
||||||
|
of RequestState.Finished:
|
||||||
|
return some State(PurchaseFinished())
|
||||||
|
of RequestState.Failed:
|
||||||
|
return some State(PurchaseFailed())
|
||||||
|
|
|
@ -13,26 +13,30 @@ push: {.upraises: [].}
|
||||||
|
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
|
||||||
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/presto
|
import pkg/presto except toJson
|
||||||
import pkg/libp2p
|
import pkg/metrics except toJson
|
||||||
import pkg/stew/base10
|
import pkg/stew/base10
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
import pkg/confutils
|
import pkg/confutils
|
||||||
|
|
||||||
|
import pkg/libp2p
|
||||||
import pkg/libp2p/routing_record
|
import pkg/libp2p/routing_record
|
||||||
import pkg/libp2pdht/discv5/spr as spr
|
import pkg/codexdht/discv5/spr as spr
|
||||||
|
|
||||||
|
import ../logutils
|
||||||
import ../node
|
import ../node
|
||||||
import ../blocktype
|
import ../blocktype
|
||||||
import ../conf
|
import ../conf
|
||||||
import ../contracts
|
import ../contracts
|
||||||
import ../streams
|
import ../erasure/erasure
|
||||||
|
import ../manifest
|
||||||
|
import ../streams/asyncstreamwrapper
|
||||||
|
import ../stores
|
||||||
|
import ../utils/options
|
||||||
|
|
||||||
import ./coders
|
import ./coders
|
||||||
import ./json
|
import ./json
|
||||||
|
@ -40,130 +44,91 @@ import ./json
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex restapi"
|
topics = "codex restapi"
|
||||||
|
|
||||||
|
declareCounter(codex_api_uploads, "codex API uploads")
|
||||||
|
declareCounter(codex_api_downloads, "codex API downloads")
|
||||||
|
|
||||||
proc validate(
|
proc validate(
|
||||||
pattern: string,
|
pattern: string,
|
||||||
value: string): int
|
value: string): int
|
||||||
{.gcsafe, raises: [Defect].} =
|
{.gcsafe, raises: [Defect].} =
|
||||||
0
|
0
|
||||||
|
|
||||||
proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
|
||||||
var router = RestRouter.init(validate)
|
var content: seq[RestContent]
|
||||||
router.api(
|
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/connect/{peerId}") do (
|
|
||||||
peerId: PeerID,
|
|
||||||
addrs: seq[MultiAddress]) -> RestApiResponse:
|
|
||||||
## Connect to a peer
|
|
||||||
##
|
|
||||||
## If `addrs` param is supplied, it will be used to
|
|
||||||
## dial the peer, otherwise the `peerId` is used
|
|
||||||
## to invoke peer discovery, if it succeeds
|
|
||||||
## the returned addresses will be used to dial
|
|
||||||
##
|
|
||||||
|
|
||||||
if peerId.isErr:
|
proc formatManifest(cid: Cid, manifest: Manifest) =
|
||||||
return RestApiResponse.error(
|
let restContent = RestContent.init(cid, manifest)
|
||||||
Http400,
|
content.add(restContent)
|
||||||
$peerId.error())
|
|
||||||
|
|
||||||
let addresses = if addrs.isOk and addrs.get().len > 0:
|
await node.iterateManifests(formatManifest)
|
||||||
addrs.get()
|
return %RestContentList.init(content)
|
||||||
else:
|
|
||||||
without peerRecord =? (await node.findPeer(peerId.get())):
|
|
||||||
return RestApiResponse.error(
|
|
||||||
Http400,
|
|
||||||
"Unable to find Peer!")
|
|
||||||
peerRecord.addresses.mapIt(it.address)
|
|
||||||
try:
|
|
||||||
await node.connect(peerId.get(), addresses)
|
|
||||||
return RestApiResponse.response("Successfully connected to peer")
|
|
||||||
except DialFailedError as e:
|
|
||||||
return RestApiResponse.error(Http400, "Unable to dial peer")
|
|
||||||
except CatchableError as e:
|
|
||||||
return RestApiResponse.error(Http400, "Unknown error dialling peer")
|
|
||||||
|
|
||||||
router.api(
|
proc retrieveCid(
|
||||||
MethodGet,
|
node: CodexNodeRef,
|
||||||
"/api/codex/v1/download/{id}") do (
|
cid: Cid,
|
||||||
id: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
local: bool = true,
|
||||||
## Download a file from the node in a streaming
|
resp: HttpResponseRef): Future[RestApiResponse] {.async.} =
|
||||||
## manner
|
## Download a file from the node in a streaming
|
||||||
##
|
## manner
|
||||||
|
##
|
||||||
|
|
||||||
if id.isErr:
|
var
|
||||||
return RestApiResponse.error(
|
stream: LPStream
|
||||||
Http400,
|
|
||||||
$id.error())
|
|
||||||
|
|
||||||
var
|
|
||||||
stream: LPStream
|
|
||||||
|
|
||||||
var bytes = 0
|
|
||||||
try:
|
|
||||||
without stream =? (await node.retrieve(id.get())), error:
|
|
||||||
return RestApiResponse.error(Http404, error.msg)
|
|
||||||
|
|
||||||
resp.addHeader("Content-Type", "application/octet-stream")
|
|
||||||
await resp.prepareChunked()
|
|
||||||
|
|
||||||
while not stream.atEof:
|
|
||||||
var
|
|
||||||
buff = newSeqUninitialized[byte](BlockSize)
|
|
||||||
len = await stream.readOnce(addr buff[0], buff.len)
|
|
||||||
|
|
||||||
buff.setLen(len)
|
|
||||||
if buff.len <= 0:
|
|
||||||
break
|
|
||||||
|
|
||||||
bytes += buff.len
|
|
||||||
trace "Sending chunk", size = buff.len
|
|
||||||
await resp.sendChunk(addr buff[0], buff.len)
|
|
||||||
await resp.finish()
|
|
||||||
except CatchableError as exc:
|
|
||||||
trace "Excepting streaming blocks", exc = exc.msg
|
|
||||||
return RestApiResponse.error(Http500)
|
|
||||||
finally:
|
|
||||||
trace "Sent bytes", cid = id.get(), bytes
|
|
||||||
if not stream.isNil:
|
|
||||||
await stream.close()
|
|
||||||
|
|
||||||
router.rawApi(
|
|
||||||
MethodPost,
|
|
||||||
"/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
|
||||||
## Create a request for storage
|
|
||||||
##
|
|
||||||
## cid - the cid of a previously uploaded dataset
|
|
||||||
## duration - the duration of the contract
|
|
||||||
## reward - the maximum price the client is willing to pay
|
|
||||||
|
|
||||||
without cid =? cid.tryGet.catch, error:
|
|
||||||
return RestApiResponse.error(Http400, error.msg)
|
|
||||||
|
|
||||||
let body = await request.getBody()
|
|
||||||
|
|
||||||
without params =? StorageRequestParams.fromJson(body), error:
|
|
||||||
return RestApiResponse.error(Http400, error.msg)
|
|
||||||
|
|
||||||
let nodes = params.nodes |? 1
|
|
||||||
let tolerance = params.nodes |? 0
|
|
||||||
|
|
||||||
without purchaseId =? await node.requestStorage(
|
|
||||||
cid,
|
|
||||||
params.duration,
|
|
||||||
nodes,
|
|
||||||
tolerance,
|
|
||||||
params.reward,
|
|
||||||
params.expiry), error:
|
|
||||||
|
|
||||||
|
var bytes = 0
|
||||||
|
try:
|
||||||
|
without stream =? (await node.retrieve(cid, local)), error:
|
||||||
|
if error of BlockNotFoundError:
|
||||||
|
return RestApiResponse.error(Http404, error.msg)
|
||||||
|
else:
|
||||||
return RestApiResponse.error(Http500, error.msg)
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
return RestApiResponse.response(purchaseId.toHex)
|
resp.addHeader("Content-Type", "application/octet-stream")
|
||||||
|
await resp.prepareChunked()
|
||||||
|
|
||||||
|
while not stream.atEof:
|
||||||
|
var
|
||||||
|
buff = newSeqUninitialized[byte](DefaultBlockSize.int)
|
||||||
|
len = await stream.readOnce(addr buff[0], buff.len)
|
||||||
|
|
||||||
|
buff.setLen(len)
|
||||||
|
if buff.len <= 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
bytes += buff.len
|
||||||
|
await resp.sendChunk(addr buff[0], buff.len)
|
||||||
|
await resp.finish()
|
||||||
|
codex_api_downloads.inc()
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Excepting streaming blocks", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
finally:
|
||||||
|
info "Sent bytes", cid = cid, bytes
|
||||||
|
if not stream.isNil:
|
||||||
|
await stream.close()
|
||||||
|
|
||||||
|
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
|
||||||
|
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodOptions,
|
||||||
|
"/api/codex/v1/data") do (
|
||||||
|
resp: HttpResponseRef) -> RestApiResponse:
|
||||||
|
|
||||||
|
if corsOrigin =? allowedOrigin:
|
||||||
|
resp.setHeader("Access-Control-Allow-Origin", corsOrigin)
|
||||||
|
resp.setHeader("Access-Control-Allow-Methods", "POST, OPTIONS")
|
||||||
|
resp.setHeader("Access-Control-Allow-Headers", "content-type")
|
||||||
|
resp.setHeader("Access-Control-Max-Age", "86400")
|
||||||
|
|
||||||
|
resp.status = Http204
|
||||||
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.rawApi(
|
router.rawApi(
|
||||||
MethodPost,
|
MethodPost,
|
||||||
"/api/codex/v1/upload") do (
|
"/api/codex/v1/data") do (
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Upload a file in a streamming manner
|
## Upload a file in a streaming manner
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Handling file upload"
|
trace "Handling file upload"
|
||||||
|
@ -186,18 +151,487 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
||||||
trace "Error uploading file", exc = error.msg
|
trace "Error uploading file", exc = error.msg
|
||||||
return RestApiResponse.error(Http500, error.msg)
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
|
codex_api_uploads.inc()
|
||||||
trace "Uploaded file", cid
|
trace "Uploaded file", cid
|
||||||
return RestApiResponse.response($cid)
|
return RestApiResponse.response($cid)
|
||||||
except CancelledError as exc:
|
except CancelledError:
|
||||||
|
trace "Upload cancelled error"
|
||||||
return RestApiResponse.error(Http500)
|
return RestApiResponse.error(Http500)
|
||||||
except AsyncStreamError:
|
except AsyncStreamError:
|
||||||
|
trace "Async stream error"
|
||||||
return RestApiResponse.error(Http500)
|
return RestApiResponse.error(Http500)
|
||||||
finally:
|
finally:
|
||||||
await reader.closeWait()
|
await reader.closeWait()
|
||||||
|
|
||||||
# if we got here something went wrong?
|
trace "Something went wrong error"
|
||||||
return RestApiResponse.error(Http500)
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/data") do () -> RestApiResponse:
|
||||||
|
let json = await formatManifestBlocks(node)
|
||||||
|
return RestApiResponse.response($json, contentType="application/json")
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/data/{cid}") do (
|
||||||
|
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
||||||
|
## Download a file from the local node in a streaming
|
||||||
|
## manner
|
||||||
|
if cid.isErr:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
$cid.error())
|
||||||
|
|
||||||
|
if corsOrigin =? allowedOrigin:
|
||||||
|
resp.setHeader("Access-Control-Allow-Origin", corsOrigin)
|
||||||
|
resp.setHeader("Access-Control-Allow-Methods", "GET, OPTIONS")
|
||||||
|
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||||
|
resp.setHeader("Access-Control-Max-Age", "86400")
|
||||||
|
|
||||||
|
await node.retrieveCid(cid.get(), local = true, resp=resp)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/data/{cid}/network") do (
|
||||||
|
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
||||||
|
## Download a file from the network in a streaming
|
||||||
|
## manner
|
||||||
|
##
|
||||||
|
|
||||||
|
if cid.isErr:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
$cid.error())
|
||||||
|
|
||||||
|
if corsOrigin =? allowedOrigin:
|
||||||
|
resp.setHeader("Access-Control-Allow-Origin", corsOrigin)
|
||||||
|
resp.setHeader("Access-Control-Allow-Methods", "GET, OPTIONS")
|
||||||
|
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||||
|
resp.setHeader("Access-Control-Max-Age", "86400")
|
||||||
|
|
||||||
|
await node.retrieveCid(cid.get(), local = false, resp=resp)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/space") do () -> RestApiResponse:
|
||||||
|
let json = % RestRepoStore(
|
||||||
|
totalBlocks: repoStore.totalBlocks,
|
||||||
|
quotaMaxBytes: repoStore.quotaMaxBytes,
|
||||||
|
quotaUsedBytes: repoStore.quotaUsedBytes,
|
||||||
|
quotaReservedBytes: repoStore.quotaReservedBytes
|
||||||
|
)
|
||||||
|
return RestApiResponse.response($json, contentType="application/json")
|
||||||
|
|
||||||
|
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/sales/slots") do () -> RestApiResponse:
|
||||||
|
## Returns active slots for the host
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.host:
|
||||||
|
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||||
|
|
||||||
|
let json = %(await contracts.sales.mySlots())
|
||||||
|
return RestApiResponse.response($json, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/sales/slots/{slotId}") do (slotId: SlotId) -> RestApiResponse:
|
||||||
|
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
||||||
|
## slot is not active for the host.
|
||||||
|
|
||||||
|
without contracts =? node.contracts.host:
|
||||||
|
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||||
|
|
||||||
|
without slotId =? slotId.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
without agent =? await contracts.sales.activeSale(slotId):
|
||||||
|
return RestApiResponse.error(Http404, "Provider not filling slot")
|
||||||
|
|
||||||
|
let restAgent = RestSalesAgent(
|
||||||
|
state: agent.state() |? "none",
|
||||||
|
slotIndex: agent.data.slotIndex,
|
||||||
|
requestId: agent.data.requestId
|
||||||
|
)
|
||||||
|
|
||||||
|
return RestApiResponse.response(restAgent.toJson, contentType="application/json")
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||||
|
## Returns storage that is for sale
|
||||||
|
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.host:
|
||||||
|
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||||
|
|
||||||
|
without avails =? (await contracts.sales.context.reservations.all(Availability)), err:
|
||||||
|
return RestApiResponse.error(Http500, err.msg)
|
||||||
|
|
||||||
|
let json = %avails
|
||||||
|
return RestApiResponse.response($json, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.rawApi(
|
||||||
|
MethodPost,
|
||||||
|
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||||
|
## Add available storage to sell.
|
||||||
|
## Every time Availability's offer finishes, its capacity is returned to the availability.
|
||||||
|
##
|
||||||
|
## totalSize - size of available storage in bytes
|
||||||
|
## duration - maximum time the storage should be sold for (in seconds)
|
||||||
|
## minPrice - minimum price to be paid (in amount of tokens)
|
||||||
|
## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens)
|
||||||
|
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.host:
|
||||||
|
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||||
|
|
||||||
|
let body = await request.getBody()
|
||||||
|
|
||||||
|
without restAv =? RestAvailability.fromJson(body), error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
let reservations = contracts.sales.context.reservations
|
||||||
|
|
||||||
|
if restAv.totalSize == 0:
|
||||||
|
return RestApiResponse.error(Http400, "Total size must be larger then zero")
|
||||||
|
|
||||||
|
if not reservations.hasAvailable(restAv.totalSize.truncate(uint)):
|
||||||
|
return RestApiResponse.error(Http422, "Not enough storage quota")
|
||||||
|
|
||||||
|
without availability =? (
|
||||||
|
await reservations.createAvailability(
|
||||||
|
restAv.totalSize,
|
||||||
|
restAv.duration,
|
||||||
|
restAv.minPrice,
|
||||||
|
restAv.maxCollateral)
|
||||||
|
), error:
|
||||||
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
|
return RestApiResponse.response(availability.toJson,
|
||||||
|
Http201,
|
||||||
|
contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.rawApi(
|
||||||
|
MethodPatch,
|
||||||
|
"/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId) -> RestApiResponse:
|
||||||
|
## Updates Availability.
|
||||||
|
## The new parameters will be only considered for new requests.
|
||||||
|
## Existing Requests linked to this Availability will continue as is.
|
||||||
|
##
|
||||||
|
## totalSize - size of available storage in bytes. When decreasing the size, then lower limit is the currently `totalSize - freeSize`.
|
||||||
|
## duration - maximum time the storage should be sold for (in seconds)
|
||||||
|
## minPrice - minimum price to be paid (in amount of tokens)
|
||||||
|
## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens)
|
||||||
|
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.host:
|
||||||
|
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||||
|
|
||||||
|
without id =? id.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
without keyId =? id.key.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
let
|
||||||
|
body = await request.getBody()
|
||||||
|
reservations = contracts.sales.context.reservations
|
||||||
|
|
||||||
|
type OptRestAvailability = Optionalize(RestAvailability)
|
||||||
|
without restAv =? OptRestAvailability.fromJson(body), error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
without availability =? (await reservations.get(keyId, Availability)), error:
|
||||||
|
if error of NotExistsError:
|
||||||
|
return RestApiResponse.error(Http404, "Availability not found")
|
||||||
|
|
||||||
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
|
if isSome restAv.freeSize:
|
||||||
|
return RestApiResponse.error(Http400, "Updating freeSize is not allowed")
|
||||||
|
|
||||||
|
if size =? restAv.totalSize:
|
||||||
|
# we don't allow lowering the totalSize bellow currently utilized size
|
||||||
|
if size < (availability.totalSize - availability.freeSize):
|
||||||
|
return RestApiResponse.error(Http400, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize))
|
||||||
|
|
||||||
|
availability.freeSize += size - availability.totalSize
|
||||||
|
availability.totalSize = size
|
||||||
|
|
||||||
|
if duration =? restAv.duration:
|
||||||
|
availability.duration = duration
|
||||||
|
|
||||||
|
if minPrice =? restAv.minPrice:
|
||||||
|
availability.minPrice = minPrice
|
||||||
|
|
||||||
|
if maxCollateral =? restAv.maxCollateral:
|
||||||
|
availability.maxCollateral = maxCollateral
|
||||||
|
|
||||||
|
if err =? (await reservations.update(availability)).errorOption:
|
||||||
|
return RestApiResponse.error(Http500, err.msg)
|
||||||
|
|
||||||
|
return RestApiResponse.response(Http200)
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.rawApi(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/sales/availability/{id}/reservations") do (id: AvailabilityId) -> RestApiResponse:
|
||||||
|
## Gets Availability's reservations.
|
||||||
|
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.host:
|
||||||
|
return RestApiResponse.error(Http503, "Sales unavailable")
|
||||||
|
|
||||||
|
without id =? id.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
without keyId =? id.key.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
let reservations = contracts.sales.context.reservations
|
||||||
|
|
||||||
|
if error =? (await reservations.get(keyId, Availability)).errorOption:
|
||||||
|
if error of NotExistsError:
|
||||||
|
return RestApiResponse.error(Http404, "Availability not found")
|
||||||
|
else:
|
||||||
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
|
without availabilitysReservations =? (await reservations.all(Reservation, id)), err:
|
||||||
|
return RestApiResponse.error(Http500, err.msg)
|
||||||
|
|
||||||
|
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
|
||||||
|
return RestApiResponse.response(availabilitysReservations.toJson, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||||
|
router.rawApi(
|
||||||
|
MethodPost,
|
||||||
|
"/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
||||||
|
## Create a request for storage
|
||||||
|
##
|
||||||
|
## cid - the cid of a previously uploaded dataset
|
||||||
|
## duration - the duration of the request in seconds
|
||||||
|
## proofProbability - how often storage proofs are required
|
||||||
|
## reward - the maximum amount of tokens paid per second per slot to hosts the client is willing to pay
|
||||||
|
## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data
|
||||||
|
## nodes - number of nodes the content should be stored on
|
||||||
|
## tolerance - allowed number of nodes that can be lost before content is lost
|
||||||
|
## colateral - requested collateral from hosts when they fill slot
|
||||||
|
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.client:
|
||||||
|
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||||
|
|
||||||
|
without cid =? cid.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
let body = await request.getBody()
|
||||||
|
|
||||||
|
without params =? StorageRequestParams.fromJson(body), error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
let nodes = params.nodes |? 1
|
||||||
|
let tolerance = params.tolerance |? 0
|
||||||
|
|
||||||
|
# prevent underflow
|
||||||
|
if tolerance > nodes:
|
||||||
|
return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`")
|
||||||
|
|
||||||
|
let ecK = nodes - tolerance
|
||||||
|
let ecM = tolerance # for readability
|
||||||
|
|
||||||
|
# ensure leopard constrainst of 1 < K ≥ M
|
||||||
|
if ecK <= 1 or ecK < ecM:
|
||||||
|
return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`")
|
||||||
|
|
||||||
|
without expiry =? params.expiry:
|
||||||
|
return RestApiResponse.error(Http400, "Expiry required")
|
||||||
|
|
||||||
|
if expiry <= 0 or expiry >= params.duration:
|
||||||
|
return RestApiResponse.error(Http400, "Expiry needs value bigger then zero and smaller then the request's duration")
|
||||||
|
|
||||||
|
without purchaseId =? await node.requestStorage(
|
||||||
|
cid,
|
||||||
|
params.duration,
|
||||||
|
params.proofProbability,
|
||||||
|
nodes,
|
||||||
|
tolerance,
|
||||||
|
params.reward,
|
||||||
|
params.collateral,
|
||||||
|
expiry), error:
|
||||||
|
|
||||||
|
if error of InsufficientBlocksError:
|
||||||
|
return RestApiResponse.error(Http400,
|
||||||
|
"Dataset too small for erasure parameters, need at least " &
|
||||||
|
$(ref InsufficientBlocksError)(error).minSize.int & " bytes")
|
||||||
|
|
||||||
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
|
return RestApiResponse.response(purchaseId.toHex)
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/storage/purchases/{id}") do (
|
||||||
|
id: PurchaseId) -> RestApiResponse:
|
||||||
|
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.client:
|
||||||
|
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||||
|
|
||||||
|
without id =? id.tryGet.catch, error:
|
||||||
|
return RestApiResponse.error(Http400, error.msg)
|
||||||
|
|
||||||
|
without purchase =? contracts.purchasing.getPurchase(id):
|
||||||
|
return RestApiResponse.error(Http404)
|
||||||
|
|
||||||
|
let json = % RestPurchase(
|
||||||
|
state: purchase.state |? "none",
|
||||||
|
error: purchase.error.?msg,
|
||||||
|
request: purchase.request,
|
||||||
|
requestId: purchase.requestId
|
||||||
|
)
|
||||||
|
|
||||||
|
return RestApiResponse.response($json, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/storage/purchases") do () -> RestApiResponse:
|
||||||
|
try:
|
||||||
|
without contracts =? node.contracts.client:
|
||||||
|
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
||||||
|
|
||||||
|
let purchaseIds = contracts.purchasing.getPurchaseIds()
|
||||||
|
return RestApiResponse.response($ %purchaseIds, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||||
|
## various node management api's
|
||||||
|
##
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/spr") do () -> RestApiResponse:
|
||||||
|
## Returns node SPR in requested format, json or text.
|
||||||
|
##
|
||||||
|
try:
|
||||||
|
without spr =? node.discovery.dhtRecord:
|
||||||
|
return RestApiResponse.response("", status=Http503, contentType="application/json")
|
||||||
|
|
||||||
|
if $preferredContentType().get() == "text/plain":
|
||||||
|
return RestApiResponse.response(spr.toURI, contentType="text/plain")
|
||||||
|
else:
|
||||||
|
return RestApiResponse.response($ %* {"spr": spr.toURI}, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/peerid") do () -> RestApiResponse:
|
||||||
|
## Returns node's peerId in requested format, json or text.
|
||||||
|
##
|
||||||
|
try:
|
||||||
|
let id = $node.switch.peerInfo.peerId
|
||||||
|
|
||||||
|
if $preferredContentType().get() == "text/plain":
|
||||||
|
return RestApiResponse.response(id, contentType="text/plain")
|
||||||
|
else:
|
||||||
|
return RestApiResponse.response($ %* {"id": id}, contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/connect/{peerId}") do (
|
||||||
|
peerId: PeerId,
|
||||||
|
addrs: seq[MultiAddress]) -> RestApiResponse:
|
||||||
|
## Connect to a peer
|
||||||
|
##
|
||||||
|
## If `addrs` param is supplied, it will be used to
|
||||||
|
## dial the peer, otherwise the `peerId` is used
|
||||||
|
## to invoke peer discovery, if it succeeds
|
||||||
|
## the returned addresses will be used to dial
|
||||||
|
##
|
||||||
|
## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs`
|
||||||
|
##
|
||||||
|
|
||||||
|
if peerId.isErr:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
$peerId.error())
|
||||||
|
|
||||||
|
let addresses = if addrs.isOk and addrs.get().len > 0:
|
||||||
|
addrs.get()
|
||||||
|
else:
|
||||||
|
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
"Unable to find Peer!")
|
||||||
|
peerRecord.addresses.mapIt(it.address)
|
||||||
|
try:
|
||||||
|
await node.connect(peerId.get(), addresses)
|
||||||
|
return RestApiResponse.response("Successfully connected to peer")
|
||||||
|
except DialFailedError:
|
||||||
|
return RestApiResponse.error(Http400, "Unable to dial peer")
|
||||||
|
except CatchableError:
|
||||||
|
return RestApiResponse.error(Http500, "Unknown error dialling peer")
|
||||||
|
|
||||||
|
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/debug/info") do () -> RestApiResponse:
|
||||||
|
## Print rudimentary node information
|
||||||
|
##
|
||||||
|
|
||||||
|
try:
|
||||||
|
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
|
||||||
|
|
||||||
|
let
|
||||||
|
json = %*{
|
||||||
|
"id": $node.switch.peerInfo.peerId,
|
||||||
|
"addrs": node.switch.peerInfo.addrs.mapIt( $it ),
|
||||||
|
"repo": $conf.dataDir,
|
||||||
|
"spr":
|
||||||
|
if node.discovery.dhtRecord.isSome:
|
||||||
|
node.discovery.dhtRecord.get.toURI
|
||||||
|
else:
|
||||||
|
"",
|
||||||
|
"announceAddresses": node.discovery.announceAddrs,
|
||||||
|
"table": table,
|
||||||
|
"codex": {
|
||||||
|
"version": $codexVersion,
|
||||||
|
"revision": $codexRevision
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# return pretty json for human readability
|
||||||
|
return RestApiResponse.response(json.pretty(), contentType="application/json")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
router.api(
|
router.api(
|
||||||
MethodPost,
|
MethodPost,
|
||||||
"/api/codex/v1/debug/chronicles/loglevel") do (
|
"/api/codex/v1/debug/chronicles/loglevel") do (
|
||||||
|
@ -209,87 +643,53 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
||||||
## `level` - chronicles log level
|
## `level` - chronicles log level
|
||||||
##
|
##
|
||||||
|
|
||||||
without res =? level and level =? res:
|
try:
|
||||||
return RestApiResponse.error(Http400, "Missing log level")
|
without res =? level and level =? res:
|
||||||
|
return RestApiResponse.error(Http400, "Missing log level")
|
||||||
|
|
||||||
|
try:
|
||||||
|
{.gcsafe.}:
|
||||||
|
updateLogLevel(level)
|
||||||
|
except CatchableError as exc:
|
||||||
|
return RestApiResponse.error(Http500, exc.msg)
|
||||||
|
|
||||||
|
return RestApiResponse.response("")
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
|
when codex_enable_api_debug_peers:
|
||||||
|
router.api(
|
||||||
|
MethodGet,
|
||||||
|
"/api/codex/v1/debug/peer/{peerId}") do (peerId: PeerId) -> RestApiResponse:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
{.gcsafe.}:
|
trace "debug/peer start"
|
||||||
updateLogLevel(level)
|
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||||
|
trace "debug/peer peer not found!"
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
"Unable to find Peer!")
|
||||||
|
|
||||||
|
let json = %RestPeerRecord.init(peerRecord)
|
||||||
|
trace "debug/peer returning peer record"
|
||||||
|
return RestApiResponse.response($json)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
return RestApiResponse.error(Http500, exc.msg)
|
trace "Excepting processing request", exc = exc.msg
|
||||||
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
return RestApiResponse.response("")
|
proc initRestApi*(
|
||||||
|
node: CodexNodeRef,
|
||||||
|
conf: CodexConf,
|
||||||
|
repoStore: RepoStore,
|
||||||
|
corsAllowedOrigin: ?string): RestRouter =
|
||||||
|
|
||||||
router.api(
|
var router = RestRouter.init(validate, corsAllowedOrigin)
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/debug/info") do () -> RestApiResponse:
|
|
||||||
## Print rudimentary node information
|
|
||||||
##
|
|
||||||
|
|
||||||
let
|
|
||||||
json = %*{
|
|
||||||
"id": $node.switch.peerInfo.peerId,
|
|
||||||
"addrs": node.switch.peerInfo.addrs.mapIt( $it ),
|
|
||||||
"repo": $conf.dataDir,
|
|
||||||
"spr":
|
|
||||||
if node.discovery.dhtRecord.isSome:
|
|
||||||
node.discovery.dhtRecord.get.toURI
|
|
||||||
else:
|
|
||||||
""
|
|
||||||
}
|
|
||||||
|
|
||||||
return RestApiResponse.response($json)
|
|
||||||
|
|
||||||
router.api(
|
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
|
||||||
## Returns storage that is for sale
|
|
||||||
|
|
||||||
without contracts =? node.contracts:
|
|
||||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
|
||||||
|
|
||||||
let json = %contracts.sales.available
|
|
||||||
return RestApiResponse.response($json)
|
|
||||||
|
|
||||||
router.rawApi(
|
|
||||||
MethodPost,
|
|
||||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
|
||||||
## Add available storage to sell
|
|
||||||
##
|
|
||||||
## size - size of available storage in bytes
|
|
||||||
## duration - maximum time the storage should be sold for (in seconds)
|
|
||||||
## minPrice - minimum price to be paid (in amount of tokens)
|
|
||||||
|
|
||||||
without contracts =? node.contracts:
|
|
||||||
return RestApiResponse.error(Http503, "Sales unavailable")
|
|
||||||
|
|
||||||
let body = await request.getBody()
|
|
||||||
|
|
||||||
without availability =? Availability.fromJson(body), error:
|
|
||||||
return RestApiResponse.error(Http400, error.msg)
|
|
||||||
|
|
||||||
contracts.sales.add(availability)
|
|
||||||
|
|
||||||
let json = %availability
|
|
||||||
return RestApiResponse.response($json)
|
|
||||||
|
|
||||||
router.api(
|
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/storage/purchases/{id}") do (
|
|
||||||
id: PurchaseId) -> RestApiResponse:
|
|
||||||
|
|
||||||
without contracts =? node.contracts:
|
|
||||||
return RestApiResponse.error(Http503, "Purchasing unavailable")
|
|
||||||
|
|
||||||
without id =? id.tryGet.catch, error:
|
|
||||||
return RestApiResponse.error(Http400, error.msg)
|
|
||||||
|
|
||||||
without purchase =? contracts.purchasing.getPurchase(id):
|
|
||||||
return RestApiResponse.error(Http404)
|
|
||||||
|
|
||||||
let json = %purchase
|
|
||||||
|
|
||||||
return RestApiResponse.response($json)
|
|
||||||
|
|
||||||
|
initDataApi(node, repoStore, router)
|
||||||
|
initSalesApi(node, router)
|
||||||
|
initPurchasingApi(node, router)
|
||||||
|
initNodeApi(node, conf, router)
|
||||||
|
initDebugApi(node, conf, router)
|
||||||
|
|
||||||
return router
|
return router
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue