mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-02 13:33:10 +00:00
Compare commits
250 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60861d6af8 | ||
|
|
49e801803f | ||
|
|
858101c74c | ||
|
|
bd49591fff | ||
|
|
6765beee2c | ||
|
|
45fec4b524 | ||
|
|
9ac9f6ff3c | ||
|
|
bd36032251 | ||
|
|
be759baf4d | ||
|
|
6147a751f1 | ||
|
|
ee47ca8760 | ||
|
|
f791a960f2 | ||
|
|
db8f866db4 | ||
|
|
7aca2f0e61 | ||
|
|
072bff5cab | ||
|
|
af55a761e6 | ||
|
|
e3d8d195c3 | ||
|
|
d1f2e2399b | ||
|
|
8cd10edb69 | ||
|
|
6cf99e255c | ||
|
|
7eb2fb12cc | ||
|
|
352273ff81 | ||
|
|
9ef9258720 | ||
|
|
7927afe715 | ||
|
|
01615354af | ||
|
|
baff902137 | ||
|
|
4d44154a40 | ||
|
|
e1c397e112 | ||
|
|
7b660e3554 | ||
|
|
c5e424ff1b | ||
|
|
36f64ad3e6 | ||
|
|
235c0ec842 | ||
|
|
d443df441d | ||
|
|
e35aec7870 | ||
|
|
93e4e0f177 | ||
|
|
6db6bf5f72 | ||
|
|
b305e00160 | ||
|
|
3d2d8273e6 | ||
|
|
e324ac8ca5 | ||
|
|
f267d99ea8 | ||
|
|
8af73e02a9 | ||
|
|
27d807a841 | ||
|
|
85823342e9 | ||
|
|
09a8419942 | ||
|
|
7502b9ad2c | ||
|
|
3e17207a0b | ||
|
|
1bea94c390 | ||
|
|
ffbbee01b1 | ||
|
|
2dd436bfb7 | ||
|
|
2e1306ac2d | ||
|
|
45ade0e3c1 | ||
|
|
ca869f6dce | ||
|
|
e43872d0b8 | ||
|
|
d59c5b023c | ||
|
|
28a83db69e | ||
|
|
13811825b3 | ||
|
|
827d9ccccf | ||
|
|
c689542579 | ||
|
|
71422f0d3d | ||
|
|
25a8077e80 | ||
|
|
bfbd7264df | ||
|
|
f7d06cd0e8 | ||
|
|
748830570a | ||
|
|
bde98738c2 | ||
|
|
28e87d06cc | ||
|
|
f144099377 | ||
|
|
19a5e05c13 | ||
|
|
b39d541227 | ||
|
|
d220e53fe1 | ||
|
|
2eb83a0ebb | ||
|
|
22f5150d1d | ||
|
|
0f152d333c | ||
|
|
acf81d0571 | ||
|
|
7c7871ac75 | ||
|
|
b92f79a654 | ||
|
|
6f62afef75 | ||
|
|
4e2a321ad5 | ||
|
|
1213377ac4 | ||
|
|
e9c6d19873 | ||
|
|
5ec3b2b027 | ||
|
|
0ec52abc98 | ||
|
|
0032e60398 | ||
|
|
7deeb7d2b3 | ||
|
|
60b6996eb0 | ||
|
|
a0d6fbaf02 | ||
|
|
709a8648fd | ||
|
|
110147d8ef | ||
|
|
3a312596bf | ||
|
|
9d7b521519 | ||
|
|
54177e9fbf | ||
|
|
75db491d84 | ||
|
|
f1b84dc6d1 | ||
|
|
a5db757de3 | ||
|
|
a0ddcef08d | ||
|
|
1cac3e2a11 | ||
|
|
2538ff8da3 | ||
|
|
17d3bb55cf | ||
|
|
703921df32 | ||
|
|
2a3a29720f | ||
|
|
eb09e610d5 | ||
|
|
7065718e09 | ||
|
|
fab5e16afd | ||
|
|
16dce0fc43 | ||
|
|
a609baea26 | ||
|
|
f6aee4ff6e | ||
|
|
44981d24d0 | ||
|
|
04327a3986 | ||
|
|
87590f43ce | ||
|
|
1052dad30c | ||
|
|
2298a0bf81 | ||
|
|
0107eb06fe | ||
|
|
6e73338425 | ||
|
|
5af3477793 | ||
|
|
dc08ff8840 | ||
|
|
25c84f4e0e | ||
|
|
c65148822e | ||
|
|
45e97513a7 | ||
|
|
20f6fef7ab | ||
|
|
bbe1f09cd7 | ||
|
|
11888e78d7 | ||
|
|
8880ad9cd4 | ||
|
|
dfa90a9981 | ||
|
|
17d3f99f45 | ||
|
|
e62a09d9b1 | ||
|
|
c05eec422c | ||
|
|
54d499be41 | ||
|
|
0595723f66 | ||
|
|
58a962add8 | ||
|
|
962fc1cd95 | ||
|
|
f6c792de79 | ||
|
|
1c4184f29c | ||
|
|
e5df8c50d3 | ||
| d114e6e942 | |||
| 893f6d02ab | |||
|
|
389ab59aa7 | ||
|
|
ac12de37b2 | ||
|
|
833e253baa | ||
|
|
2ad7c31c85 | ||
|
|
4606726e27 | ||
|
|
68ad804f9e | ||
|
|
39e8e6e6fa | ||
| 0cffa02748 | |||
| 3dc7224330 | |||
|
|
f25c555d59 | ||
|
|
caed3c07a3 | ||
|
|
74c46b3651 | ||
|
|
407f77871f | ||
|
|
145aa5d84a | ||
|
|
0badcb662a | ||
|
|
4b99b58645 | ||
|
|
6ff4d30b43 | ||
|
|
8645d336ff | ||
|
|
20bb5e5a38 | ||
|
|
c498e2f53b | ||
|
|
f0f04ddf1d | ||
|
|
bef1160799 | ||
|
|
b0cc27f563 | ||
|
|
5f2ba14281 | ||
|
|
6d415b0ace | ||
|
|
01fb685bf6 | ||
|
|
92a0eda79a | ||
|
|
1f49f86131 | ||
|
|
7c804b0ec9 | ||
|
|
19af79786e | ||
|
|
d10072bf67 | ||
|
|
da234d503b | ||
|
|
855b973811 | ||
|
|
0c6784da7e | ||
|
|
fb4577f25c | ||
|
|
f51eae30fe | ||
|
|
8e29939cf8 | ||
|
|
921159f87f | ||
|
|
63e54d135c | ||
|
|
0707446cdd | ||
|
|
3f510eb501 | ||
|
|
ab019a08ae | ||
|
|
21249968d4 | ||
|
|
d47ce38894 | ||
|
|
5c6bbb0cee | ||
|
|
024c75e4f9 | ||
|
|
d12de20868 | ||
|
|
29433bad9a | ||
|
|
6038fb456e | ||
|
|
71b8a95d12 | ||
|
|
9b7f3f4aaf | ||
|
|
d7ae8b734a | ||
|
|
a6f0311b50 | ||
|
|
2151e02838 | ||
|
|
86257054ee | ||
|
|
96459188c9 | ||
|
|
b8dd68063f | ||
|
|
2b5a40559e | ||
|
|
942f940c92 | ||
|
|
a2ac7453fa | ||
|
|
2fb7031ec6 | ||
|
|
bcc1468130 | ||
|
|
40068512a6 | ||
|
|
0157ca4c57 | ||
|
|
3a2d0926f1 | ||
|
|
562e4329e2 | ||
|
|
436baef20a | ||
|
|
7c33473c88 | ||
|
|
93960033f4 | ||
|
|
ffa203b04f | ||
|
|
3699601393 | ||
|
|
1fe3abfd03 | ||
|
|
7e0ec3c233 | ||
|
|
44f21b8a68 | ||
|
|
f02de34f77 | ||
|
|
17f0988fc7 | ||
|
|
0ea8cfb085 | ||
|
|
566db2fa30 | ||
|
|
91e4e368de | ||
|
|
4c51dca299 | ||
|
|
b5ee57afd7 | ||
|
|
b491906005 | ||
|
|
5ace105a66 | ||
|
|
e1a02c8b76 | ||
|
|
264bfa17f5 | ||
|
|
dcfc249945 | ||
|
|
8b374d5acb | ||
|
|
4b9336ec07 | ||
|
|
098c4bb6e9 | ||
|
|
032d7e7495 | ||
|
|
4e8630791a | ||
|
|
e8e9820d5b | ||
|
|
1e2ad95659 | ||
|
|
e017b05cf1 | ||
|
|
63ee4ca0f9 | ||
|
|
15303125f6 | ||
|
|
eeb048e386 | ||
|
|
bb9a5fbe92 | ||
|
|
2771ca6319 | ||
|
|
b7934fc686 | ||
|
|
4f56f2af26 | ||
|
|
fbce240e3d | ||
|
|
8f740b42e6 | ||
|
|
3ae73197c2 | ||
|
|
fbf1b51f57 | ||
|
|
67facb4b2a | ||
|
|
b004ca75f6 | ||
|
|
11eaebfd77 | ||
|
|
5b131611ac | ||
|
|
a79bbd459a | ||
|
|
a55b676a42 | ||
|
|
ad53066131 | ||
|
|
8138ef5afd | ||
|
|
4619260dc1 | ||
|
|
ec7faa21b5 | ||
|
|
1a57341b7d |
2
.git-blame-ignore-revs
Normal file
2
.git-blame-ignore-revs
Normal file
@ -0,0 +1,2 @@
|
||||
# Formatted with nph v0.6.1-0-g0d8000e
|
||||
e5df8c50d3b6e70e6eec1ff031657d2b7bb6fe63
|
||||
72
.github/actions/nimbus-build-system/action.yml
vendored
72
.github/actions/nimbus-build-system/action.yml
vendored
@ -11,13 +11,16 @@ inputs:
|
||||
default: "amd64"
|
||||
nim_version:
|
||||
description: "Nim version"
|
||||
default: "version-1-6"
|
||||
default: "v2.0.14"
|
||||
rust_version:
|
||||
description: "Rust version"
|
||||
default: "1.78.0"
|
||||
default: "1.79.0"
|
||||
shell:
|
||||
description: "Shell to run commands in"
|
||||
default: "bash --noprofile --norc -e -o pipefail"
|
||||
coverage:
|
||||
description: "True if the process is used for coverage"
|
||||
default: false
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
@ -31,8 +34,8 @@ runs:
|
||||
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
sudo apt-fast update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
|
||||
sudo apt-get update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||
--no-install-recommends -yq lcov
|
||||
|
||||
- name: APT (Linux i386)
|
||||
@ -40,8 +43,8 @@ runs:
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
sudo dpkg --add-architecture i386
|
||||
sudo apt-fast update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
|
||||
sudo apt-get update -qq
|
||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||
--no-install-recommends -yq gcc-multilib g++-multilib
|
||||
|
||||
- name: Homebrew (macOS)
|
||||
@ -78,11 +81,48 @@ runs:
|
||||
mingw-w64-i686-ntldd-git
|
||||
mingw-w64-i686-rust
|
||||
|
||||
- name: MSYS2 (Windows All) - Downgrade to gcc 13
|
||||
- name: Install gcc 14 on Linux
|
||||
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
|
||||
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
# Skip for older Ubuntu versions
|
||||
if [[ $(lsb_release -r | awk -F '[^0-9]+' '{print $2}') -ge 24 ]]; then
|
||||
# Install GCC-14
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -yq gcc-14
|
||||
# Add GCC-14 to alternatives
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||
# Set GCC-14 as the default
|
||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||
fi
|
||||
|
||||
- name: Install ccache on Linux/Mac
|
||||
if: inputs.os == 'linux' || inputs.os == 'macos'
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
with:
|
||||
create-symlink: true
|
||||
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
|
||||
evict-old-files: 7d
|
||||
|
||||
- name: Install ccache on Windows
|
||||
if: inputs.os == 'windows'
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
with:
|
||||
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
|
||||
evict-old-files: 7d
|
||||
|
||||
- name: Enable ccache on Windows
|
||||
if: inputs.os == 'windows'
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
|
||||
CCACHE_DIR=$(dirname $(which ccache))/ccached
|
||||
mkdir ${CCACHE_DIR}
|
||||
ln -s $(which ccache) ${CCACHE_DIR}/gcc.exe
|
||||
ln -s $(which ccache) ${CCACHE_DIR}/g++.exe
|
||||
ln -s $(which ccache) ${CCACHE_DIR}/cc.exe
|
||||
ln -s $(which ccache) ${CCACHE_DIR}/c++.exe
|
||||
echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2
|
||||
|
||||
- name: Derive environment variables
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
@ -141,8 +181,11 @@ runs:
|
||||
llvm_bin_dir="${llvm_dir}/bin"
|
||||
llvm_lib_dir="${llvm_dir}/lib"
|
||||
echo "${llvm_bin_dir}" >> ${GITHUB_PATH}
|
||||
# Make sure ccache has precedence (GITHUB_PATH is appending before)
|
||||
echo "$(brew --prefix)/opt/ccache/libexec" >> ${GITHUB_PATH}
|
||||
echo $PATH
|
||||
echo "LDFLAGS=${LDFLAGS} -L${libomp_lib_dir} -L${llvm_lib_dir} -Wl,-rpath,${llvm_lib_dir}" >> ${GITHUB_ENV}
|
||||
NIMFLAGS="${NIMFLAGS} $(quote "-d:LeopardCmakeFlags='-DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=${llvm_bin_dir}/clang -DCMAKE_CXX_COMPILER=${llvm_bin_dir}/clang++' -d:LeopardExtraCompilerlags='-fopenmp' -d:LeopardExtraLinkerFlags='-fopenmp -L${libomp_lib_dir}'")"
|
||||
NIMFLAGS="${NIMFLAGS} $(quote "-d:LeopardCmakeFlags='-DCMAKE_BUILD_TYPE=Release' -d:LeopardExtraCompilerFlags='-fopenmp' -d:LeopardExtraLinkerFlags='-fopenmp -L${libomp_lib_dir}'")"
|
||||
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
@ -159,6 +202,7 @@ runs:
|
||||
- name: Restore Nim toolchain binaries from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v4
|
||||
if : ${{ inputs.coverage != 'true' }}
|
||||
with:
|
||||
path: NimBinaries
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||
@ -168,9 +212,17 @@ runs:
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Build Nim and Codex dependencies
|
||||
- name: MSYS2 (Windows All) - Disable git symbolic links (since miniupnp 2.2.5)
|
||||
if: inputs.os == 'windows'
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
git config --global core.symlinks false
|
||||
|
||||
- name: Build Nim and Logos Storage dependencies
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
which gcc
|
||||
gcc --version
|
||||
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
|
||||
echo
|
||||
./env.sh nim --version
|
||||
|
||||
28
.github/workflows/Readme.md
vendored
28
.github/workflows/Readme.md
vendored
@ -3,12 +3,14 @@ Tips for shorter build times
|
||||
|
||||
### Runner availability ###
|
||||
|
||||
Currently, the biggest bottleneck when optimizing workflows is the availability
|
||||
of Windows and macOS runners. Therefore, anything that reduces the time spent in
|
||||
Windows or macOS jobs will have a positive impact on the time waiting for
|
||||
runners to become available. The usage limits for Github Actions are [described
|
||||
here][limits]. You can see a breakdown of runner usage for your jobs in the
|
||||
Github Actions tab ([example][usage]).
|
||||
When running on the Github free, pro or team plan, the bottleneck when
|
||||
optimizing workflows is the availability of macOS runners. Therefore, anything
|
||||
that reduces the time spent in macOS jobs will have a positive impact on the
|
||||
time waiting for runners to become available. On the Github enterprise plan,
|
||||
this is not the case and you can more freely use parallelization on multiple
|
||||
runners. The usage limits for Github Actions are [described here][limits]. You
|
||||
can see a breakdown of runner usage for your jobs in the Github Actions tab
|
||||
([example][usage]).
|
||||
|
||||
### Windows is slow ###
|
||||
|
||||
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
|
||||
|
||||
Breaking up a long build job into several jobs that you run in parallel can have
|
||||
a positive impact on the wall clock time that a workflow runs. For instance, you
|
||||
might consider running unit tests and integration tests in parallel. Keep in
|
||||
mind however that availability of macOS and Windows runners is the biggest
|
||||
bottleneck. If you split a Windows job into two jobs, you now need to wait for
|
||||
two Windows runners to become available! Therefore parallelization often only
|
||||
makes sense for Linux jobs.
|
||||
might consider running unit tests and integration tests in parallel. When
|
||||
running on the Github free, pro or team plan, keep in mind that availability of
|
||||
macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
|
||||
need to wait for two macOS runners to become available.
|
||||
|
||||
### Refactoring ###
|
||||
|
||||
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
|
||||
to know whether you introduced a failure on all platforms, or only on a single
|
||||
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
|
||||
runners busy for longer on a workflow that you know is going to fail anyway.
|
||||
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
|
||||
Consequent runs will therefore take longer to start. Fail fast is most likely
|
||||
better for overall development speed.
|
||||
|
||||
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
|
||||
[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
|
||||
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
|
||||
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache
|
||||
|
||||
22
.github/workflows/ci-reusable.yml
vendored
22
.github/workflows/ci-reusable.yml
vendored
@ -24,9 +24,9 @@ jobs:
|
||||
run:
|
||||
shell: ${{ matrix.shell }} {0}
|
||||
|
||||
name: '${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.tests }}'
|
||||
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
|
||||
runs-on: ${{ matrix.builder }}
|
||||
timeout-minutes: 100
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
@ -38,28 +38,32 @@ jobs:
|
||||
uses: ./.github/actions/nimbus-build-system
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_version: ${{ matrix.nim_version }}
|
||||
coverage: false
|
||||
|
||||
## Part 1 Tests ##
|
||||
- name: Unit tests
|
||||
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} test
|
||||
|
||||
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
|
||||
- name: Setup Node.js
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.15
|
||||
node-version: 22
|
||||
|
||||
- name: Start Ethereum node with Codex contracts
|
||||
- name: Start Ethereum node with Logos Storage contracts
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||
working-directory: vendor/codex-contracts-eth
|
||||
working-directory: vendor/logos-storage-contracts-eth
|
||||
env:
|
||||
MSYS2_PATH_TYPE: inherit
|
||||
run: |
|
||||
npm install
|
||||
npm ci
|
||||
npm start &
|
||||
# Wait for the contracts to be deployed
|
||||
sleep 5
|
||||
|
||||
## Part 2 Tests ##
|
||||
- name: Contract tests
|
||||
@ -69,13 +73,15 @@ jobs:
|
||||
## Part 3 Tests ##
|
||||
- name: Integration tests
|
||||
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
||||
env:
|
||||
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
|
||||
run: make -j${ncpu} testIntegration
|
||||
|
||||
- name: Upload integration tests log files
|
||||
uses: actions/upload-artifact@v4
|
||||
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
|
||||
with:
|
||||
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
|
||||
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
|
||||
path: tests/integration/logs/
|
||||
retention-days: 1
|
||||
|
||||
|
||||
39
.github/workflows/ci.yml
vendored
39
.github/workflows/ci.yml
vendored
@ -9,31 +9,28 @@ on:
|
||||
|
||||
env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
|
||||
nim_version: v2.2.4
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ env.cache_nonce }}
|
||||
steps:
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
run: |
|
||||
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
|
||||
tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
|
||||
echo 'EOF' >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
@ -42,8 +39,21 @@ jobs:
|
||||
matrix: ${{ needs.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
||||
|
||||
linting:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check `nph` formatting
|
||||
uses: arnetheduck/nph-action@v1
|
||||
with:
|
||||
version: 0.6.1
|
||||
options: "codex/ tests/"
|
||||
fail: true
|
||||
suggest: true
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
@ -56,6 +66,7 @@ jobs:
|
||||
with:
|
||||
os: linux
|
||||
nim_version: ${{ env.nim_version }}
|
||||
coverage: true
|
||||
|
||||
- name: Generate coverage data
|
||||
run: |
|
||||
|
||||
19
.github/workflows/conventional-commits.yml
vendored
Normal file
19
.github/workflows/conventional-commits.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
name: Conventional Commits Linting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
pr-title:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- name: PR Conventional Commit Validation
|
||||
uses: ytanikin/pr-conventional-commits@1.4.1
|
||||
with:
|
||||
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'
|
||||
33
.github/workflows/docker-dist-tests.yml
vendored
33
.github/workflows/docker-dist-tests.yml
vendored
@ -1,33 +0,0 @@
|
||||
name: Docker - Dist-Tests
|
||||
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
- '.gitignore'
|
||||
- '.github/**'
|
||||
- '!.github/workflows/docker-dist-tests.yml'
|
||||
- '!.github/workflows/docker-reusable.yml'
|
||||
- 'docker/**'
|
||||
- '!docker/codex.Dockerfile'
|
||||
- '!docker/docker-entrypoint.sh'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
with:
|
||||
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
|
||||
nat_ip_auto: true
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
tag_suffix: dist-tests
|
||||
continuous_tests_list: PeersTest HoldMyBeerTest
|
||||
continuous_tests_duration: 12h
|
||||
secrets: inherit
|
||||
147
.github/workflows/docker-reusable.yml
vendored
147
.github/workflows/docker-reusable.yml
vendored
@ -34,6 +34,11 @@ on:
|
||||
description: Set latest tag for Docker images
|
||||
required: false
|
||||
type: boolean
|
||||
tag_stable:
|
||||
default: false
|
||||
description: Set stable tag for Docker images
|
||||
required: false
|
||||
type: boolean
|
||||
tag_sha:
|
||||
default: true
|
||||
description: Set Git short commit as Docker tag
|
||||
@ -54,6 +59,19 @@ on:
|
||||
description: Continuous Tests duration
|
||||
required: false
|
||||
type: string
|
||||
run_release_tests:
|
||||
description: Run Release tests
|
||||
required: false
|
||||
type: string
|
||||
default: false
|
||||
contract_image:
|
||||
description: Specifies compatible smart contract image
|
||||
required: false
|
||||
type: string
|
||||
outputs:
|
||||
codex_image:
|
||||
description: Logos Storage Docker image tag
|
||||
value: ${{ jobs.publish.outputs.codex_image }}
|
||||
|
||||
|
||||
env:
|
||||
@ -64,19 +82,33 @@ env:
|
||||
NIMFLAGS: ${{ inputs.nimflags }}
|
||||
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
|
||||
TAG_LATEST: ${{ inputs.tag_latest }}
|
||||
TAG_STABLE: ${{ inputs.tag_stable }}
|
||||
TAG_SHA: ${{ inputs.tag_sha }}
|
||||
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
||||
CONTRACT_IMAGE: ${{ inputs.contract_image }}
|
||||
# Tests
|
||||
CONTINUOUS_TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
||||
CONTINUOUS_TESTS_BRANCH: master
|
||||
TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
|
||||
TESTS_BRANCH: master
|
||||
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
|
||||
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
|
||||
CONTINUOUS_TESTS_NAMEPREFIX: c-tests-ci
|
||||
|
||||
|
||||
jobs:
|
||||
# Compute variables
|
||||
compute:
|
||||
name: Compute build ID
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build_id: ${{ steps.build_id.outputs.build_id }}
|
||||
steps:
|
||||
- name: Generate unique build id
|
||||
id: build_id
|
||||
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
|
||||
|
||||
# Build platform specific image
|
||||
build:
|
||||
needs: compute
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
@ -89,11 +121,11 @@ jobs:
|
||||
- target:
|
||||
os: linux
|
||||
arch: amd64
|
||||
builder: ubuntu-22.04
|
||||
builder: ubuntu-24.04
|
||||
- target:
|
||||
os: linux
|
||||
arch: arm64
|
||||
builder: buildjet-4vcpu-ubuntu-2204-arm
|
||||
builder: ubuntu-24.04-arm
|
||||
|
||||
name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }}
|
||||
runs-on: ${{ matrix.builder }}
|
||||
@ -103,11 +135,19 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Docker - Variables
|
||||
run: |
|
||||
# Create contract label for compatible contract image if specified
|
||||
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.DOCKER_REPO }}
|
||||
labels: ${{ env.CONTRACT_LABEL }}
|
||||
|
||||
- name: Docker - Set up Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -142,7 +182,7 @@ jobs:
|
||||
- name: Docker - Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ matrix.target.arch }}
|
||||
name: digests-${{ needs.compute.outputs.build_id }}-${{ matrix.target.arch }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
@ -154,35 +194,41 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
needs: build
|
||||
codex_image: ${{ steps.image_tag.outputs.codex_image }}
|
||||
needs: [build, compute]
|
||||
steps:
|
||||
- name: Docker - Variables
|
||||
run: |
|
||||
# Adjust custom suffix when set and
|
||||
# Adjust custom suffix when set
|
||||
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
|
||||
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
|
||||
fi
|
||||
# Disable SHA tags on tagged release
|
||||
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
|
||||
echo "TAG_SHA=false" >>$GITHUB_ENV
|
||||
echo "TAG_SHA=false" >> $GITHUB_ENV
|
||||
fi
|
||||
# Handle latest and latest-custom using raw
|
||||
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
|
||||
echo "TAG_LATEST=false" >>$GITHUB_ENV
|
||||
echo "TAG_RAW=true" >>$GITHUB_ENV
|
||||
echo "TAG_LATEST=false" >> $GITHUB_ENV
|
||||
echo "TAG_RAW=true" >> $GITHUB_ENV
|
||||
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
|
||||
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
|
||||
echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
|
||||
fi
|
||||
else
|
||||
echo "TAG_RAW=false" >>$GITHUB_ENV
|
||||
echo "TAG_RAW=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
# Create contract label for compatible contract image if specified
|
||||
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: digests-*
|
||||
pattern: digests-${{ needs.compute.outputs.build_id }}-*
|
||||
merge-multiple: true
|
||||
path: /tmp/digests
|
||||
|
||||
@ -194,12 +240,14 @@ jobs:
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.DOCKER_REPO }}
|
||||
labels: ${{ env.CONTRACT_LABEL }}
|
||||
flavor: |
|
||||
latest=${{ env.TAG_LATEST }}
|
||||
suffix=${{ env.TAG_SUFFIX }},onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=raw,enable=${{ env.TAG_RAW }},value=latest
|
||||
type=raw,enable=${{ env.TAG_STABLE }},value=stable
|
||||
type=sha,enable=${{ env.TAG_SHA }}
|
||||
|
||||
- name: Docker - Login to Docker Hub
|
||||
@ -214,54 +262,81 @@ jobs:
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
|
||||
|
||||
- name: Docker - Image tag
|
||||
id: image_tag
|
||||
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Docker - Inspect image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
|
||||
run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
|
||||
|
||||
|
||||
# Compute Continuous Tests inputs
|
||||
# Compute Tests inputs
|
||||
compute-tests-inputs:
|
||||
name: Compute Continuous Tests list
|
||||
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
|
||||
name: Compute Tests inputs
|
||||
if: ${{ inputs.continuous_tests_list != '' || inputs.run_release_tests == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: publish
|
||||
outputs:
|
||||
source: ${{ steps.compute.outputs.source }}
|
||||
branch: ${{ steps.compute.outputs.branch }}
|
||||
branch: ${{ env.TESTS_BRANCH }}
|
||||
workflow_source: ${{ env.TESTS_SOURCE }}
|
||||
codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }}
|
||||
nameprefix: ${{ steps.compute.outputs.nameprefix }}
|
||||
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
|
||||
continuous_tests_duration: ${{ steps.compute.outputs.continuous_tests_duration }}
|
||||
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
|
||||
workflow_source: ${{ steps.compute.outputs.workflow_source }}
|
||||
steps:
|
||||
- name: Compute Continuous Tests list
|
||||
- name: Compute Tests inputs
|
||||
id: compute
|
||||
run: |
|
||||
echo "source=${{ format('{0}/{1}', github.server_url, env.CONTINUOUS_TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
|
||||
echo "branch=${{ env.CONTINUOUS_TESTS_BRANCH }}" >> "$GITHUB_OUTPUT"
|
||||
echo "source=${{ format('{0}/{1}', github.server_url, env.TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
|
||||
echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
|
||||
# Compute Continuous Tests inputs
|
||||
compute-continuous-tests-inputs:
|
||||
name: Compute Continuous Tests inputs
|
||||
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: compute-tests-inputs
|
||||
outputs:
|
||||
nameprefix: ${{ steps.compute.outputs.nameprefix }}
|
||||
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
|
||||
continuous_tests_duration: ${{ env.CONTINUOUS_TESTS_DURATION }}
|
||||
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
|
||||
steps:
|
||||
- name: Compute Continuous Tests inputs
|
||||
id: compute
|
||||
run: |
|
||||
echo "nameprefix=$(awk '{ print tolower($0) }' <<< ${{ env.CONTINUOUS_TESTS_NAMEPREFIX }})" >> "$GITHUB_OUTPUT"
|
||||
echo "continuous_tests_list=$(jq -cR 'split(" ")' <<< '${{ env.CONTINUOUS_TESTS_LIST }}')" >> "$GITHUB_OUTPUT"
|
||||
echo "continuous_tests_duration=${{ env.CONTINUOUS_TESTS_DURATION }}" >> "$GITHUB_OUTPUT"
|
||||
echo "workflow_source=${{ env.CONTINUOUS_TESTS_SOURCE }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
|
||||
# Run Continuous Tests
|
||||
run-tests:
|
||||
run-continuous-tests:
|
||||
name: Run Continuous Tests
|
||||
needs: [publish, compute-tests-inputs]
|
||||
needs: [compute-tests-inputs, compute-continuous-tests-inputs]
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
tests: ${{ fromJSON(needs.compute-tests-inputs.outputs.continuous_tests_list) }}
|
||||
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
||||
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
|
||||
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
||||
with:
|
||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
|
||||
nameprefix: ${{ needs.compute-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||
nameprefix: ${{ needs.compute-continuous-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-continuous-tests-inputs.outputs.continuous_tests_duration }}
|
||||
tests_filter: ${{ matrix.tests }}
|
||||
tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
|
||||
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
# Run Release Tests
|
||||
run-release-tests:
|
||||
name: Run Release Tests
|
||||
needs: [compute-tests-inputs]
|
||||
if: ${{ inputs.run_release_tests == 'true' }}
|
||||
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
|
||||
with:
|
||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
|
||||
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
|
||||
secrets: inherit
|
||||
|
||||
20
.github/workflows/docker.yml
vendored
20
.github/workflows/docker.yml
vendored
@ -18,11 +18,27 @@ on:
|
||||
- '!docker/docker-entrypoint.sh'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
get-contracts-hash:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.get-hash.outputs.hash }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Get submodule short hash
|
||||
id: get-hash
|
||||
run: |
|
||||
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
needs: get-contracts-hash
|
||||
with:
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
secrets: inherit
|
||||
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
|
||||
secrets: inherit
|
||||
29
.github/workflows/docs.yml
vendored
29
.github/workflows/docs.yml
vendored
@ -2,17 +2,17 @@ name: OpenAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
paths:
|
||||
- 'openapi.yaml'
|
||||
- '.github/workflows/docs.yml'
|
||||
- "openapi.yaml"
|
||||
- ".github/workflows/docs.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- '**'
|
||||
- "**"
|
||||
paths:
|
||||
- 'openapi.yaml'
|
||||
- '.github/workflows/docs.yml'
|
||||
- "openapi.yaml"
|
||||
- ".github/workflows/docs.yml"
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
@ -28,38 +28,39 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- name: Lint OpenAPI
|
||||
shell: bash
|
||||
run: npx @redocly/cli lint openapi.yaml
|
||||
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/master'
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- name: Build OpenAPI
|
||||
shell: bash
|
||||
run: npx @redocly/cli build-docs openapi.yaml --output "openapi/index.html" --title "Codex API"
|
||||
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
|
||||
|
||||
- name: Build Postman Collection
|
||||
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: './openapi'
|
||||
path: openapi
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
16
.github/workflows/nim-matrix.yml
vendored
16
.github/workflows/nim-matrix.yml
vendored
@ -8,19 +8,21 @@ env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
|
||||
jobs:
|
||||
jobs:
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||
cache_nonce: ${{ env.cache_nonce }}
|
||||
steps:
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
run: |
|
||||
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
|
||||
tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
|
||||
echo 'EOF' >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
|
||||
70
.github/workflows/release.yml
vendored
70
.github/workflows/release.yml
vendored
@ -4,13 +4,15 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
rust_version: 1.78.0
|
||||
codex_binary_base: codex
|
||||
rust_version: 1.79.0
|
||||
storage_binary_base: storage
|
||||
cirdl_binary_base: cirdl
|
||||
build_dir: build
|
||||
nim_flags: ''
|
||||
@ -25,14 +27,13 @@ jobs:
|
||||
steps:
|
||||
- name: Compute matrix
|
||||
id: matrix
|
||||
uses: fabiocaccamo/create-matrix-action@v4
|
||||
uses: fabiocaccamo/create-matrix-action@v5
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {arm64}, builder {buildjet-4vcpu-ubuntu-2204-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
||||
|
||||
# Build
|
||||
build:
|
||||
@ -72,18 +73,18 @@ jobs:
|
||||
windows*) os_name="windows" ;;
|
||||
esac
|
||||
github_ref_name="${GITHUB_REF_NAME/\//-}"
|
||||
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
||||
storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
||||
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
||||
if [[ ${os_name} == "windows" ]]; then
|
||||
codex_binary="${codex_binary}.exe"
|
||||
storage_binary="${storage_binary}.exe"
|
||||
cirdl_binary="${cirdl_binary}.exe"
|
||||
fi
|
||||
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV
|
||||
echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
|
||||
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
|
||||
|
||||
- name: Release - Build
|
||||
run: |
|
||||
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}"
|
||||
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
|
||||
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
|
||||
|
||||
- name: Release - Libraries
|
||||
@ -94,11 +95,11 @@ jobs:
|
||||
done
|
||||
fi
|
||||
|
||||
- name: Release - Upload codex build artifacts
|
||||
- name: Release - Upload Logos Storage build artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-${{ env.codex_binary }}
|
||||
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}*
|
||||
name: release-${{ env.storage_binary }}
|
||||
path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
|
||||
retention-days: 30
|
||||
|
||||
- name: Release - Upload cirdl build artifacts
|
||||
@ -138,7 +139,7 @@ jobs:
|
||||
}
|
||||
|
||||
# Compress and prepare
|
||||
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do
|
||||
for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
|
||||
if [[ "${file}" == *".exe"* ]]; then
|
||||
|
||||
# Windows - binary only
|
||||
@ -170,6 +171,34 @@ jobs:
|
||||
path: /tmp/release/
|
||||
retention-days: 30
|
||||
|
||||
- name: Release - Upload to the cloud
|
||||
env:
|
||||
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
|
||||
s3_bucket: ${{ secrets.S3_BUCKET }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
|
||||
run: |
|
||||
# Variables
|
||||
branch="${GITHUB_REF_NAME/\//-}"
|
||||
folder="/tmp/release"
|
||||
|
||||
# Tagged releases
|
||||
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
|
||||
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||
echo "${branch}" > "${folder}"/latest
|
||||
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
|
||||
rm -f "${folder}"/latest
|
||||
|
||||
# master branch
|
||||
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
|
||||
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||
|
||||
# Custom branch
|
||||
else
|
||||
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||
fi
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@ -177,3 +206,12 @@ jobs:
|
||||
files: |
|
||||
/tmp/release/*
|
||||
make_latest: true
|
||||
|
||||
- name: Generate Python SDK
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_PAT }}
|
||||
repository: logos-storage/logos-storage-py-api-client
|
||||
event-type: generate
|
||||
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@ -5,9 +5,13 @@
|
||||
|
||||
!LICENSE*
|
||||
!Makefile
|
||||
!Jenkinsfile
|
||||
|
||||
nimcache/
|
||||
|
||||
# Executables when using nix will be stored in result/ directory
|
||||
result/
|
||||
|
||||
# Executables shall be put in an ignored build/ directory
|
||||
build/
|
||||
|
||||
@ -41,3 +45,5 @@ docker/prometheus-data
|
||||
.DS_Store
|
||||
nim.cfg
|
||||
tests/integration/logs
|
||||
|
||||
data/
|
||||
|
||||
58
.gitmodules
vendored
58
.gitmodules
vendored
@ -37,22 +37,17 @@
|
||||
path = vendor/nim-nitro
|
||||
url = https://github.com/status-im/nim-nitro.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/questionable"]
|
||||
path = vendor/questionable
|
||||
url = https://github.com/status-im/questionable.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/upraises"]
|
||||
path = vendor/upraises
|
||||
url = https://github.com/markspanbroek/upraises.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/asynctest"]
|
||||
path = vendor/asynctest
|
||||
url = https://github.com/status-im/asynctest.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/nim-presto"]
|
||||
path = vendor/nim-presto
|
||||
url = https://github.com/status-im/nim-presto.git
|
||||
@ -132,7 +127,7 @@
|
||||
path = vendor/nim-websock
|
||||
url = https://github.com/status-im/nim-websock.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/nim-contract-abi"]
|
||||
path = vendor/nim-contract-abi
|
||||
url = https://github.com/status-im/nim-contract-abi
|
||||
@ -160,13 +155,13 @@
|
||||
path = vendor/nim-taskpools
|
||||
url = https://github.com/status-im/nim-taskpools.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = stable
|
||||
[submodule "vendor/nim-leopard"]
|
||||
path = vendor/nim-leopard
|
||||
url = https://github.com/status-im/nim-leopard.git
|
||||
[submodule "vendor/nim-codex-dht"]
|
||||
path = vendor/nim-codex-dht
|
||||
url = https://github.com/codex-storage/nim-codex-dht.git
|
||||
[submodule "vendor/logos-storage-nim-dht"]
|
||||
path = vendor/logos-storage-nim-dht
|
||||
url = https://github.com/logos-storage/logos-storage-nim-dht.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-datastore"]
|
||||
@ -178,9 +173,11 @@
|
||||
[submodule "vendor/nim-eth"]
|
||||
path = vendor/nim-eth
|
||||
url = https://github.com/status-im/nim-eth
|
||||
[submodule "vendor/codex-contracts-eth"]
|
||||
path = vendor/codex-contracts-eth
|
||||
url = https://github.com/status-im/codex-contracts-eth
|
||||
[submodule "vendor/logos-storage-contracts-eth"]
|
||||
path = vendor/logos-storage-contracts-eth
|
||||
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-protobuf-serialization"]
|
||||
path = vendor/nim-protobuf-serialization
|
||||
url = https://github.com/status-im/nim-protobuf-serialization
|
||||
@ -195,26 +192,41 @@
|
||||
url = https://github.com/zevv/npeg
|
||||
[submodule "vendor/nim-poseidon2"]
|
||||
path = vendor/nim-poseidon2
|
||||
url = https://github.com/codex-storage/nim-poseidon2.git
|
||||
url = https://github.com/logos-storage/nim-poseidon2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/constantine"]
|
||||
path = vendor/constantine
|
||||
url = https://github.com/mratsim/constantine.git
|
||||
[submodule "vendor/nim-circom-compat"]
|
||||
path = vendor/nim-circom-compat
|
||||
url = https://github.com/codex-storage/nim-circom-compat.git
|
||||
url = https://github.com/logos-storage/nim-circom-compat.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/codex-storage-proofs-circuits"]
|
||||
path = vendor/codex-storage-proofs-circuits
|
||||
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
|
||||
[submodule "vendor/logos-storage-proofs-circuits"]
|
||||
path = vendor/logos-storage-proofs-circuits
|
||||
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-serde"]
|
||||
path = vendor/nim-serde
|
||||
url = https://github.com/codex-storage/nim-serde.git
|
||||
url = https://github.com/logos-storage/nim-serde.git
|
||||
[submodule "vendor/nim-leveldbstatic"]
|
||||
path = vendor/nim-leveldbstatic
|
||||
url = https://github.com/codex-storage/nim-leveldb.git
|
||||
url = https://github.com/logos-storage/nim-leveldb.git
|
||||
[submodule "vendor/nim-zippy"]
|
||||
path = vendor/nim-zippy
|
||||
url = https://github.com/status-im/nim-zippy.git
|
||||
[submodule "vendor/nph"]
|
||||
path = vendor/nph
|
||||
url = https://github.com/arnetheduck/nph.git
|
||||
[submodule "vendor/nim-quic"]
|
||||
path = vendor/nim-quic
|
||||
url = https://github.com/vacp2p/nim-quic.git
|
||||
ignore = untracked
|
||||
branch = main
|
||||
[submodule "vendor/nim-ngtcp2"]
|
||||
path = vendor/nim-ngtcp2
|
||||
url = https://github.com/vacp2p/nim-ngtcp2.git
|
||||
ignore = untracked
|
||||
branch = main
|
||||
37
Jenkinsfile
vendored
Normal file
37
Jenkinsfile
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env groovy
|
||||
library 'status-jenkins-lib@v1.9.13'
|
||||
|
||||
pipeline {
|
||||
agent { label 'linux && x86_64 && nix-2.24' }
|
||||
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
/* manage how many builds we keep */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '20',
|
||||
daysToKeepStr: '30',
|
||||
))
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
script {
|
||||
nix.flake("default")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Check') {
|
||||
steps {
|
||||
script {
|
||||
sh './result/bin/storage --version'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
cleanup { cleanWs() }
|
||||
}
|
||||
}
|
||||
112
Makefile
112
Makefile
@ -15,7 +15,7 @@
|
||||
#
|
||||
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
||||
# version pinned by nimbus-build-system.
|
||||
PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
||||
PINNED_NIM_VERSION := v2.2.4
|
||||
|
||||
ifeq ($(NIM_COMMIT),)
|
||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||
@ -40,6 +40,30 @@ DOCKER_IMAGE_NIM_PARAMS ?= -d:chronicles_colors:none -d:insecure
|
||||
|
||||
LINK_PCRE := 0
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
ifeq ($(PROCESSOR_ARCHITECTURE), AMD64)
|
||||
ARCH = x86_64
|
||||
endif
|
||||
ifeq ($(PROCESSOR_ARCHITECTURE), ARM64)
|
||||
ARCH = arm64
|
||||
endif
|
||||
else
|
||||
UNAME_P := $(shell uname -m)
|
||||
ifneq ($(filter $(UNAME_P), i686 i386 x86_64),)
|
||||
ARCH = x86_64
|
||||
endif
|
||||
ifneq ($(filter $(UNAME_P), aarch64 arm),)
|
||||
ARCH = arm64
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), x86_64)
|
||||
CXXFLAGS ?= -std=c++17 -mssse3
|
||||
else
|
||||
CXXFLAGS ?= -std=c++17
|
||||
endif
|
||||
export CXXFLAGS
|
||||
|
||||
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
|
||||
|
||||
@ -69,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
|
||||
|
||||
# default target, because it's the first one that doesn't start with '.'
|
||||
|
||||
# Builds the codex binary
|
||||
# Builds the Logos Storage binary
|
||||
all: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
|
||||
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
|
||||
|
||||
# Build tools/cirdl
|
||||
cirdl: | deps
|
||||
@ -114,12 +138,12 @@ test: | build deps
|
||||
# Builds and runs the smart contract tests
|
||||
testContracts: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
|
||||
|
||||
# Builds and runs the integration tests
|
||||
testIntegration: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
|
||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
|
||||
|
||||
# Builds and runs all tests (except for Taiko L2 tests)
|
||||
testAll: | build deps
|
||||
@ -154,11 +178,11 @@ coverage:
|
||||
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
|
||||
cd nimcache/release/testCodex && rm -f *.c
|
||||
mkdir -p coverage
|
||||
lcov --capture --directory nimcache/release/testCodex --output-file coverage/coverage.info
|
||||
lcov --capture --keep-going --directory nimcache/release/testCodex --output-file coverage/coverage.info
|
||||
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
||||
genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report
|
||||
|
||||
show-coverage:
|
||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||
@ -175,4 +199,76 @@ ifneq ($(USE_LIBBACKTRACE), 0)
|
||||
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
|
||||
endif
|
||||
|
||||
############
|
||||
## Format ##
|
||||
############
|
||||
.PHONY: build-nph install-nph-hook clean-nph print-nph-path
|
||||
|
||||
# Default location for nph binary shall be next to nim binary to make it available on the path.
|
||||
NPH:=$(shell dirname $(NIM_BINARY))/nph
|
||||
|
||||
build-nph:
|
||||
ifeq ("$(wildcard $(NPH))","")
|
||||
$(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \
|
||||
mv vendor/nph/src/nph $(shell dirname $(NPH))
|
||||
echo "nph utility is available at " $(NPH)
|
||||
endif
|
||||
|
||||
GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit
|
||||
|
||||
install-nph-hook: build-nph
|
||||
ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","")
|
||||
cp ./tools/scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK)
|
||||
else
|
||||
echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override"
|
||||
exit 1
|
||||
endif
|
||||
|
||||
nph/%: build-nph
|
||||
echo -e $(FORMAT_MSG) "nph/$*" && \
|
||||
$(NPH) $*
|
||||
|
||||
format:
|
||||
$(NPH) *.nim
|
||||
$(NPH) codex/
|
||||
$(NPH) tests/
|
||||
$(NPH) library/
|
||||
|
||||
clean-nph:
|
||||
rm -f $(NPH)
|
||||
|
||||
# To avoid hardcoding nph binary location in several places
|
||||
print-nph-path:
|
||||
echo "$(NPH)"
|
||||
|
||||
clean: | clean-nph
|
||||
|
||||
################
|
||||
## C Bindings ##
|
||||
################
|
||||
.PHONY: libstorage
|
||||
|
||||
STATIC ?= 0
|
||||
|
||||
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
|
||||
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
|
||||
endif
|
||||
|
||||
libstorage:
|
||||
$(MAKE) deps
|
||||
rm -f build/libstorage*
|
||||
|
||||
ifeq ($(STATIC), 1)
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && \
|
||||
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
else ifeq ($(detected_OS),Windows)
|
||||
echo -e $(BUILD_MSG) "build/$@.dll" && \
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
else ifeq ($(detected_OS),macOS)
|
||||
echo -e $(BUILD_MSG) "build/$@.dylib" && \
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
else
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && \
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
endif
|
||||
endif # "variables.mk" was not included
|
||||
|
||||
85
README.md
85
README.md
@ -1,22 +1,22 @@
|
||||
# Codex Decentralized Durability Engine
|
||||
# Logos Storage Decentralized Engine
|
||||
|
||||
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval.
|
||||
> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
|
||||
|
||||
> WARNING: This project is under active development and is considered pre-alpha.
|
||||
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](#stability)
|
||||
[](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
|
||||
[](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
|
||||
[](https://codecov.io/gh/codex-storage/nim-codex)
|
||||
[](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
|
||||
[](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
|
||||
[](https://codecov.io/gh/logos-storage/logos-storage-nim)
|
||||
[](https://discord.gg/CaJTh24ddQ)
|
||||

|
||||
|
||||
|
||||
## Build and Run
|
||||
|
||||
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build).
|
||||
For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
|
||||
|
||||
To build the project, clone it and run:
|
||||
|
||||
@ -29,11 +29,12 @@ The executable will be placed under the `build` directory under the project root
|
||||
Run the client with:
|
||||
|
||||
```bash
|
||||
build/codex
|
||||
build/storage
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
It is possible to configure a Codex node in several ways:
|
||||
It is possible to configure a Logos Storage node in several ways:
|
||||
1. CLI options
|
||||
2. Environment variables
|
||||
3. Configuration file
|
||||
@ -44,10 +45,72 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
|
||||
|
||||
## Guides
|
||||
|
||||
To get acquainted with Codex, consider:
|
||||
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
|
||||
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
|
||||
To get acquainted with Logos Storage, consider:
|
||||
* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
|
||||
* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
|
||||
|
||||
## API
|
||||
|
||||
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
||||
|
||||
## Bindings
|
||||
|
||||
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
|
||||
Currently, only a Go binding is included.
|
||||
|
||||
### Build the C library
|
||||
|
||||
```bash
|
||||
make libstorage
|
||||
```
|
||||
|
||||
This produces the shared library under `build/`.
|
||||
|
||||
### Run the Go example
|
||||
|
||||
Build the Go example:
|
||||
|
||||
```bash
|
||||
go build -o storage-go examples/golang/storage.go
|
||||
```
|
||||
|
||||
Export the library path:
|
||||
|
||||
```bash
|
||||
export LD_LIBRARY_PATH=build
|
||||
```
|
||||
|
||||
Run the example:
|
||||
|
||||
```bash
|
||||
./storage-go
|
||||
```
|
||||
|
||||
### Static vs Dynamic build
|
||||
|
||||
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
|
||||
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
|
||||
|
||||
```bash
|
||||
# Build dynamic (default)
|
||||
make libstorage
|
||||
|
||||
# Build static
|
||||
make STATIC=1 libstorage
|
||||
```
|
||||
|
||||
### Limitation
|
||||
|
||||
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
|
||||
|
||||
## Contributing and development
|
||||
|
||||
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
||||
|
||||
### Linting and formatting
|
||||
|
||||
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
|
||||
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
|
||||
In order to format files run `make nph/<file/folder you want to format>`.
|
||||
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
|
||||
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
|
||||
@ -10,17 +10,17 @@ nim c -r run_benchmarks
|
||||
```
|
||||
|
||||
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
|
||||
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
||||
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
||||
|
||||
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
|
||||
|
||||
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
|
||||
|
||||
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
||||
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
||||
|
||||
## Codex Ark Circom CLI
|
||||
## Logos Storage Ark Circom CLI
|
||||
|
||||
Runs Codex's prover setup with Ark / Circom.
|
||||
Runs Logos Storage's prover setup with Ark / Circom.
|
||||
|
||||
Compile:
|
||||
```sh
|
||||
|
||||
@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
|
||||
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
|
||||
let codexDir = findCodexProjectDir()
|
||||
result.nimCircuitCli =
|
||||
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
|
||||
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
|
||||
"proof_input" / "cli"
|
||||
result.circuitDirIncludes =
|
||||
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
|
||||
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
|
||||
result.ptauPath =
|
||||
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
|
||||
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
|
||||
@ -118,7 +118,7 @@ proc createCircuit*(
|
||||
##
|
||||
## All needed circuit files will be generated as needed.
|
||||
## They will be located in `circBenchDir` which defaults to a folder like:
|
||||
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
||||
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
||||
## with all the given CircuitArgs.
|
||||
##
|
||||
let circdir = circBenchDir
|
||||
|
||||
@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
|
||||
)
|
||||
benchRuns[benchmarkName] = (runs.avg(), count)
|
||||
|
||||
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
|
||||
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
|
||||
if printRegular:
|
||||
echo ""
|
||||
for k, v in benchRuns:
|
||||
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
||||
|
||||
|
||||
if printTsv:
|
||||
echo ""
|
||||
echo "name", "\t", "avgTimeSec", "\t", "count"
|
||||
for k, v in benchRuns:
|
||||
echo k, "\t", v.avgTimeSec, "\t", v.count
|
||||
|
||||
|
||||
import std/math
|
||||
|
||||
func floorLog2*(x: int): int =
|
||||
|
||||
112
build.nims
112
build.nims
@ -3,63 +3,97 @@ mode = ScriptMode.Verbose
|
||||
import std/os except commandLineParams
|
||||
|
||||
### Helper functions
|
||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
|
||||
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
||||
var extra_params = params
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
extra_params &= " " & param
|
||||
else:
|
||||
for i in 2..<paramCount():
|
||||
for i in 2 ..< paramCount():
|
||||
extra_params &= " " & paramStr(i)
|
||||
|
||||
let
|
||||
# Place build output in 'build' folder, even if name includes a longer path.
|
||||
outName = os.lastPathPart(name)
|
||||
cmd = "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & name & ".nim"
|
||||
cmd =
|
||||
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
|
||||
srcName & ".nim"
|
||||
|
||||
exec(cmd)
|
||||
|
||||
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, srcDir, params
|
||||
exec "build/" & name
|
||||
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
if `type` == "dynamic":
|
||||
let lib_name = (
|
||||
when defined(windows): name & ".dll"
|
||||
elif defined(macosx): name & ".dylib"
|
||||
else: name & ".so"
|
||||
)
|
||||
exec "nim c" & " --out:build/" & lib_name &
|
||||
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||
"--nimMainPrefix:libstorage -d:noSignalHandler " &
|
||||
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
|
||||
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
|
||||
else:
|
||||
exec "nim c" & " --out:build/" & name &
|
||||
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||
"--nimMainPrefix:libstorage -d:noSignalHandler " &
|
||||
"-d:LeopardExtraCompilerFlags=-fPIC " &
|
||||
"-d:chronicles_runtime_filtering " &
|
||||
"-d:chronicles_log_level=TRACE " &
|
||||
params & " " & srcDir & name & ".nim"
|
||||
|
||||
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, outName, srcDir, params
|
||||
exec "build/" & outName
|
||||
|
||||
task storage, "build logos storage binary":
|
||||
buildBinary "codex",
|
||||
outname = "storage",
|
||||
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
|
||||
task toolsCirdl, "build tools/cirdl binary":
|
||||
buildBinary "tools/cirdl/cirdl"
|
||||
|
||||
task testCodex, "Build & run Codex tests":
|
||||
test "testCodex", params = "-d:codex_enable_proof_failures=true"
|
||||
task testStorage, "Build & run Logos Storage tests":
|
||||
test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
|
||||
|
||||
task testContracts, "Build & run Codex Contract tests":
|
||||
task testContracts, "Build & run Logos Storage Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
buildBinary "codex",
|
||||
outName = "storage",
|
||||
params =
|
||||
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
|
||||
test "testIntegration"
|
||||
# use params to enable logging from the integration test executable
|
||||
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
|
||||
# "-d:chronicles_enabled_topics:integration:TRACE"
|
||||
|
||||
task build, "build codex binary":
|
||||
codexTask()
|
||||
task build, "build Logos Storage binary":
|
||||
storageTask()
|
||||
|
||||
task test, "Run tests":
|
||||
testCodexTask()
|
||||
testStorageTask()
|
||||
|
||||
task testTools, "Run Tools tests":
|
||||
toolsCirdlTask()
|
||||
test "testTools"
|
||||
|
||||
task testAll, "Run all tests (except for Taiko L2 tests)":
|
||||
testCodexTask()
|
||||
testStorageTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
||||
testToolsTask()
|
||||
|
||||
task testTaiko, "Run Taiko L2 tests":
|
||||
codexTask()
|
||||
storageTask()
|
||||
test "testTaiko"
|
||||
|
||||
import strutils
|
||||
@ -85,20 +119,50 @@ task coverage, "generates code coverage report":
|
||||
|
||||
var nimSrcs = " "
|
||||
for f in walkDirRec("codex", {pcFile}):
|
||||
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
if f.endswith(".nim"):
|
||||
nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
|
||||
echo "======== Running Tests ======== "
|
||||
test "coverage", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
|
||||
test "coverage",
|
||||
srcDir = "tests/",
|
||||
params =
|
||||
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
|
||||
exec("rm nimcache/coverage/*.c")
|
||||
rmDir("coverage"); mkDir("coverage")
|
||||
rmDir("coverage")
|
||||
mkDir("coverage")
|
||||
echo " ======== Running LCOV ======== "
|
||||
exec("lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info")
|
||||
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
|
||||
exec(
|
||||
"lcov --capture --keep-going --directory nimcache/coverage --output-file coverage/coverage.info"
|
||||
)
|
||||
exec(
|
||||
"lcov --extract coverage/coverage.info --keep-going --output-file coverage/coverage.f.info " &
|
||||
nimSrcs
|
||||
)
|
||||
echo " ======== Generating HTML coverage report ======== "
|
||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
||||
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ")
|
||||
echo " ======== Coverage report Done ======== "
|
||||
|
||||
task showCoverage, "open coverage html":
|
||||
echo " ======== Opening HTML coverage report in browser... ======== "
|
||||
if findExe("open") != "":
|
||||
exec("open coverage/report/index.html")
|
||||
|
||||
task libstorageDynamic, "Generate bindings":
|
||||
var params = ""
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
if param.len > 0 and param.startsWith("-"):
|
||||
params.add " " & param
|
||||
|
||||
let name = "libstorage"
|
||||
buildLibrary name, "library/", params, "dynamic"
|
||||
|
||||
task libstorageStatic, "Generate bindings":
|
||||
var params = ""
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
if param.len > 0 and param.startsWith("-"):
|
||||
params.add " " & param
|
||||
|
||||
let name = "libstorage"
|
||||
buildLibrary name, "library/", params, "static"
|
||||
|
||||
67
codex.nim
67
codex.nim
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -28,7 +28,6 @@ import ./codex/codextypes
|
||||
export codex, conf, libp2p, chronos, logutils
|
||||
|
||||
when isMainModule:
|
||||
import std/sequtils
|
||||
import std/os
|
||||
import pkg/confutils/defs
|
||||
import ./codex/utils/fileutils
|
||||
@ -39,40 +38,45 @@ when isMainModule:
|
||||
when defined(posix):
|
||||
import system/ansi_c
|
||||
|
||||
type
|
||||
CodexStatus {.pure.} = enum
|
||||
Stopped,
|
||||
Stopping,
|
||||
Running
|
||||
type CodexStatus {.pure.} = enum
|
||||
Stopped
|
||||
Stopping
|
||||
Running
|
||||
|
||||
let config = CodexConf.load(
|
||||
version = codexFullVersion,
|
||||
envVarsPrefix = "codex",
|
||||
secondarySources = proc (config: CodexConf, sources: auto) =
|
||||
if configFile =? config.configFile:
|
||||
sources.addConfigFile(Toml, configFile)
|
||||
envVarsPrefix = "storage",
|
||||
secondarySources = proc(
|
||||
config: CodexConf, sources: auto
|
||||
) {.gcsafe, raises: [ConfigurationError].} =
|
||||
if configFile =? config.configFile:
|
||||
sources.addConfigFile(Toml, configFile)
|
||||
,
|
||||
)
|
||||
config.setupLogging()
|
||||
config.setupMetrics()
|
||||
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
try:
|
||||
updateLogLevel(config.logLevel)
|
||||
except ValueError as err:
|
||||
try:
|
||||
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
|
||||
except IOError:
|
||||
echo "Invalid value for --log-level. " & err.msg
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
config.setupMetrics()
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
if not (checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
if config.prover() and not(checkAndCreateDataDir((config.circuitDir).string)):
|
||||
if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)):
|
||||
quit QuitFailure
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
if not (checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
@ -91,25 +95,28 @@ when isMainModule:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Codex", msg = exc.msg
|
||||
quit QuitFailure
|
||||
server =
|
||||
try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Logos Storage", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
## Ctrl+C handling
|
||||
proc doShutdown() =
|
||||
shutdown = server.stop()
|
||||
shutdown = server.shutdown()
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
notice "Stopping Codex"
|
||||
notice "Stopping Logos Storage"
|
||||
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
# shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
|
||||
doShutdown()
|
||||
@ -131,7 +138,7 @@ when isMainModule:
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError as error:
|
||||
error "Codex failed to start", error = error.msg
|
||||
error "Logos Storage failed to start", error = error.msg
|
||||
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
||||
# but this would mean we'd have to fix the implementation of all
|
||||
# services so they won't crash if we attempt to stop them before they
|
||||
@ -152,7 +159,7 @@ when isMainModule:
|
||||
# be assigned before state switches to Stopping
|
||||
waitFor shutdown
|
||||
except CatchableError as error:
|
||||
error "Codex didn't shutdown correctly", error = error.msg
|
||||
error "Logos Storage didn't shutdown correctly", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
notice "Exited codex"
|
||||
notice "Exited Storage"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
version = "0.1.0"
|
||||
author = "Codex Team"
|
||||
author = "Logos Storage Team"
|
||||
description = "p2p data durability engine"
|
||||
license = "MIT"
|
||||
binDir = "build"
|
||||
|
||||
@ -1,10 +1,5 @@
|
||||
import ./blockexchange/[
|
||||
network,
|
||||
engine,
|
||||
peers]
|
||||
import ./blockexchange/[network, engine, peers]
|
||||
|
||||
import ./blockexchange/protobuf/[
|
||||
blockexc,
|
||||
presence]
|
||||
import ./blockexchange/protobuf/[blockexc, presence]
|
||||
|
||||
export network, engine, blockexc, presence, peers
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,6 +7,8 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p/cid
|
||||
import pkg/libp2p/multicodec
|
||||
@ -18,6 +20,8 @@ import ../protobuf/presence
|
||||
import ../peers
|
||||
|
||||
import ../../utils
|
||||
import ../../utils/exceptions
|
||||
import ../../utils/trackedfutures
|
||||
import ../../discovery
|
||||
import ../../stores/blockstore
|
||||
import ../../logutils
|
||||
@ -26,114 +30,122 @@ import ../../manifest
|
||||
logScope:
|
||||
topics = "codex discoveryengine advertiser"
|
||||
|
||||
declareGauge(codexInflightAdvertise, "inflight advertise requests")
|
||||
declareGauge(codex_inflight_advertise, "inflight advertise requests")
|
||||
|
||||
const
|
||||
DefaultConcurrentAdvertRequests = 10
|
||||
DefaultAdvertiseLoopSleep = 30.minutes
|
||||
|
||||
type
|
||||
Advertiser* = ref object of RootObj
|
||||
localStore*: BlockStore # Local block store for this instance
|
||||
discovery*: Discovery # Discovery interface
|
||||
type Advertiser* = ref object of RootObj
|
||||
localStore*: BlockStore # Local block store for this instance
|
||||
discovery*: Discovery # Discovery interface
|
||||
|
||||
advertiserRunning*: bool # Indicates if discovery is running
|
||||
concurrentAdvReqs: int # Concurrent advertise requests
|
||||
advertiserRunning*: bool # Indicates if discovery is running
|
||||
concurrentAdvReqs: int # Concurrent advertise requests
|
||||
|
||||
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
|
||||
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
||||
advertiseTasks*: seq[Future[void]] # Advertise tasks
|
||||
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
|
||||
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
||||
trackedFutures*: TrackedFutures # Advertise tasks futures
|
||||
|
||||
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||
|
||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
|
||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||
if cid notin b.advertiseQueue:
|
||||
await b.advertiseQueue.put(cid)
|
||||
|
||||
trace "Advertising", cid
|
||||
|
||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
|
||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||
without isM =? cid.isManifest, err:
|
||||
warn "Unable to determine if cid is manifest"
|
||||
return
|
||||
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
try:
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
|
||||
# announce manifest cid and tree cid
|
||||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
# announce manifest cid and tree cid
|
||||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
except CancelledError as exc:
|
||||
trace "Cancelled advertise block", cid
|
||||
raise exc
|
||||
except CatchableError as e:
|
||||
error "failed to advertise block", cid, error = e.msgDetail
|
||||
|
||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async.} =
|
||||
while b.advertiserRunning:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
try:
|
||||
while b.advertiserRunning:
|
||||
if cidsIter =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cidsIter:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
|
||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||
except CancelledError:
|
||||
warn "Cancelled advertise local store loop"
|
||||
|
||||
info "Exiting advertise task loop"
|
||||
|
||||
proc processQueueLoop(b: Advertiser) {.async.} =
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
let
|
||||
cid = await b.advertiseQueue.get()
|
||||
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
try:
|
||||
while b.advertiserRunning:
|
||||
let cid = await b.advertiseQueue.get()
|
||||
|
||||
if cid in b.inFlightAdvReqs:
|
||||
continue
|
||||
|
||||
try:
|
||||
let
|
||||
request = b.discovery.provide(cid)
|
||||
let request = b.discovery.provide(cid)
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codexInflightAdvertise.set(b.inFlightAdvReqs.len.int64)
|
||||
await request
|
||||
|
||||
finally:
|
||||
defer:
|
||||
b.inFlightAdvReqs.del(cid)
|
||||
codexInflightAdvertise.set(b.inFlightAdvReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Advertise task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in advertise task runner", exc = exc.msg
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
|
||||
await request
|
||||
except CancelledError:
|
||||
warn "Cancelled advertise task runner"
|
||||
|
||||
info "Exiting advertise task runner"
|
||||
|
||||
proc start*(b: Advertiser) {.async.} =
|
||||
proc start*(b: Advertiser) {.async: (raises: []).} =
|
||||
## Start the advertiser
|
||||
##
|
||||
|
||||
trace "Advertiser start"
|
||||
|
||||
proc onBlock(cid: Cid) {.async.} =
|
||||
await b.advertiseBlock(cid)
|
||||
# The advertiser is expected to be started only once.
|
||||
if b.advertiserRunning:
|
||||
raiseAssert "Advertiser can only be started once — this should not happen"
|
||||
|
||||
proc onBlock(cid: Cid) {.async: (raises: []).} =
|
||||
try:
|
||||
await b.advertiseBlock(cid)
|
||||
except CancelledError:
|
||||
trace "Cancelled advertise block", cid
|
||||
|
||||
doAssert(b.localStore.onBlockStored.isNone())
|
||||
b.localStore.onBlockStored = onBlock.some
|
||||
|
||||
if b.advertiserRunning:
|
||||
warn "Starting advertiser twice"
|
||||
return
|
||||
|
||||
b.advertiserRunning = true
|
||||
for i in 0..<b.concurrentAdvReqs:
|
||||
b.advertiseTasks.add(processQueueLoop(b))
|
||||
for i in 0 ..< b.concurrentAdvReqs:
|
||||
let fut = b.processQueueLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
|
||||
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
|
||||
b.trackedFutures.track(b.advertiseLocalStoreLoop)
|
||||
|
||||
proc stop*(b: Advertiser) {.async.} =
|
||||
proc stop*(b: Advertiser) {.async: (raises: []).} =
|
||||
## Stop the advertiser
|
||||
##
|
||||
|
||||
@ -145,26 +157,16 @@ proc stop*(b: Advertiser) {.async.} =
|
||||
b.advertiserRunning = false
|
||||
# Stop incoming tasks from callback and localStore loop
|
||||
b.localStore.onBlockStored = CidCallback.none
|
||||
if not b.advertiseLocalStoreLoop.isNil and not b.advertiseLocalStoreLoop.finished:
|
||||
trace "Awaiting advertise loop to stop"
|
||||
await b.advertiseLocalStoreLoop.cancelAndWait()
|
||||
trace "Advertise loop stopped"
|
||||
|
||||
# Clear up remaining tasks
|
||||
for task in b.advertiseTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting advertise task to stop"
|
||||
await task.cancelAndWait()
|
||||
trace "Advertise task stopped"
|
||||
|
||||
trace "Advertiser stopped"
|
||||
trace "Stopping advertise loop and tasks"
|
||||
await b.trackedFutures.cancelTracked()
|
||||
trace "Advertiser loop and tasks stopped"
|
||||
|
||||
proc new*(
|
||||
T: type Advertiser,
|
||||
localStore: BlockStore,
|
||||
discovery: Discovery,
|
||||
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
||||
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep
|
||||
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep,
|
||||
): Advertiser =
|
||||
## Create a advertiser instance
|
||||
##
|
||||
@ -173,5 +175,7 @@ proc new*(
|
||||
discovery: discovery,
|
||||
concurrentAdvReqs: concurrentAdvReqs,
|
||||
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
||||
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep)
|
||||
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep,
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -8,6 +8,7 @@
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
import std/algorithm
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p/cid
|
||||
@ -23,6 +24,7 @@ import ../network
|
||||
import ../peers
|
||||
|
||||
import ../../utils
|
||||
import ../../utils/trackedfutures
|
||||
import ../../discovery
|
||||
import ../../stores/blockstore
|
||||
import ../../logutils
|
||||
@ -31,95 +33,107 @@ import ../../manifest
|
||||
logScope:
|
||||
topics = "codex discoveryengine"
|
||||
|
||||
declareGauge(codexInflightDiscovery, "inflight discovery requests")
|
||||
declareGauge(codex_inflight_discovery, "inflight discovery requests")
|
||||
|
||||
const
|
||||
DefaultConcurrentDiscRequests = 10
|
||||
DefaultDiscoveryTimeout = 1.minutes
|
||||
DefaultMinPeersPerBlock = 3
|
||||
DefaultMaxPeersPerBlock = 8
|
||||
DefaultDiscoveryLoopSleep = 3.seconds
|
||||
|
||||
type
|
||||
DiscoveryEngine* = ref object of RootObj
|
||||
localStore*: BlockStore # Local block store for this instance
|
||||
peers*: PeerCtxStore # Peer context store
|
||||
network*: BlockExcNetwork # Network interface
|
||||
discovery*: Discovery # Discovery interface
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
discEngineRunning*: bool # Indicates if discovery is running
|
||||
concurrentDiscReqs: int # Concurrent discovery requests
|
||||
discoveryLoop*: Future[void] # Discovery loop task handle
|
||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||
discoveryTasks*: seq[Future[void]] # Discovery tasks
|
||||
minPeersPerBlock*: int # Max number of peers with block
|
||||
discoveryLoopSleep: Duration # Discovery loop sleep
|
||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
||||
type DiscoveryEngine* = ref object of RootObj
|
||||
localStore*: BlockStore # Local block store for this instance
|
||||
peers*: PeerCtxStore # Peer context store
|
||||
network*: BlockExcNetwork # Network interface
|
||||
discovery*: Discovery # Discovery interface
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
discEngineRunning*: bool # Indicates if discovery is running
|
||||
concurrentDiscReqs: int # Concurrent discovery requests
|
||||
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
|
||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||
minPeersPerBlock*: int # Min number of peers with block
|
||||
maxPeersPerBlock*: int # Max number of peers with block
|
||||
discoveryLoopSleep: Duration # Discovery loop sleep
|
||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
|
||||
# Inflight discovery requests
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
try:
|
||||
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
|
||||
var haves = b.peers.peersHave(cid)
|
||||
let count = haves.len - b.maxPeersPerBlock
|
||||
if count <= 0:
|
||||
return
|
||||
|
||||
haves.sort(
|
||||
proc(a, b: BlockExcPeerCtx): int =
|
||||
cmp(a.lastExchange, b.lastExchange)
|
||||
)
|
||||
|
||||
let toRemove = haves[0 ..< count]
|
||||
for peer in toRemove:
|
||||
try:
|
||||
peer.cleanPresence(BlockAddress.init(cid))
|
||||
trace "Removed block presence from peer", cid, peer = peer.id
|
||||
except CatchableError as exc:
|
||||
error "Failed to clean presence for peer",
|
||||
cid, peer = peer.id, error = exc.msg, name = exc.name
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
await b.discoveryQueue.put(cid)
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery loop", exc = exc.msg
|
||||
|
||||
logScope:
|
||||
sleep = b.discoveryLoopSleep
|
||||
wanted = b.pendingBlocks.len
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Run discovery tasks
|
||||
##
|
||||
|
||||
while b.discEngineRunning:
|
||||
try:
|
||||
let
|
||||
cid = await b.discoveryQueue.get()
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
let cid = await b.discoveryQueue.get()
|
||||
|
||||
if cid in b.inFlightDiscReqs:
|
||||
trace "Discovery request already in progress", cid
|
||||
continue
|
||||
|
||||
let
|
||||
haves = b.peers.peersHave(cid)
|
||||
trace "Running discovery task for cid", cid
|
||||
|
||||
let haves = b.peers.peersHave(cid)
|
||||
|
||||
if haves.len > b.maxPeersPerBlock:
|
||||
trace "Cleaning up excess peers",
|
||||
cid, peers = haves.len, max = b.maxPeersPerBlock
|
||||
b.cleanupExcessPeers(cid)
|
||||
continue
|
||||
|
||||
if haves.len < b.minPeersPerBlock:
|
||||
try:
|
||||
let
|
||||
request = b.discovery
|
||||
.find(cid)
|
||||
.wait(DefaultDiscoveryTimeout)
|
||||
let request = b.discovery.find(cid)
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
codexInflightDiscovery.set(b.inFlightDiscReqs.len.int64)
|
||||
let
|
||||
peers = await request
|
||||
defer:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
|
||||
let
|
||||
dialed = await allFinished(
|
||||
peers.mapIt( b.network.dialPeer(it.data) ))
|
||||
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
|
||||
peers =? (await request).catch:
|
||||
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
|
||||
|
||||
for i, f in dialed:
|
||||
if f.failed:
|
||||
await b.discovery.removeProvider(peers[i].data.peerId)
|
||||
|
||||
finally:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codexInflightDiscovery.set(b.inFlightDiscReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery task runner", exc = exc.msg
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
|
||||
info "Exiting discovery task runner"
|
||||
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
|
||||
for cid in cids:
|
||||
if cid notin b.discoveryQueue:
|
||||
try:
|
||||
@ -127,23 +141,27 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
except CatchableError as exc:
|
||||
warn "Exception queueing discovery request", exc = exc.msg
|
||||
|
||||
proc start*(b: DiscoveryEngine) {.async.} =
|
||||
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Start the discengine task
|
||||
##
|
||||
|
||||
trace "Discovery engine start"
|
||||
trace "Discovery engine starting"
|
||||
|
||||
if b.discEngineRunning:
|
||||
warn "Starting discovery engine twice"
|
||||
return
|
||||
|
||||
b.discEngineRunning = true
|
||||
for i in 0..<b.concurrentDiscReqs:
|
||||
b.discoveryTasks.add(discoveryTaskLoop(b))
|
||||
for i in 0 ..< b.concurrentDiscReqs:
|
||||
let fut = b.discoveryTaskLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
|
||||
b.discoveryLoop = discoveryQueueLoop(b)
|
||||
b.discoveryLoop = b.discoveryQueueLoop()
|
||||
b.trackedFutures.track(b.discoveryLoop)
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async.} =
|
||||
trace "Discovery engine started"
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Stop the discovery engine
|
||||
##
|
||||
|
||||
@ -153,16 +171,9 @@ proc stop*(b: DiscoveryEngine) {.async.} =
|
||||
return
|
||||
|
||||
b.discEngineRunning = false
|
||||
for task in b.discoveryTasks:
|
||||
if not task.finished:
|
||||
trace "Awaiting discovery task to stop"
|
||||
await task.cancelAndWait()
|
||||
trace "Discovery task stopped"
|
||||
|
||||
if not b.discoveryLoop.isNil and not b.discoveryLoop.finished:
|
||||
trace "Awaiting discovery loop to stop"
|
||||
await b.discoveryLoop.cancelAndWait()
|
||||
trace "Discovery loop stopped"
|
||||
trace "Stopping discovery loop and tasks"
|
||||
await b.trackedFutures.cancelTracked()
|
||||
trace "Discovery loop and tasks stopped"
|
||||
|
||||
trace "Discovery engine stopped"
|
||||
|
||||
@ -175,7 +186,8 @@ proc new*(
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||
minPeersPerBlock = DefaultMinPeersPerBlock
|
||||
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||
maxPeersPerBlock = DefaultMaxPeersPerBlock,
|
||||
): DiscoveryEngine =
|
||||
## Create a discovery engine instance for advertising services
|
||||
##
|
||||
@ -187,6 +199,9 @@ proc new*(
|
||||
pendingBlocks: pendingBlocks,
|
||||
concurrentDiscReqs: concurrentDiscReqs,
|
||||
discoveryQueue: newAsyncQueue[Cid](concurrentDiscReqs),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
||||
discoveryLoopSleep: discoveryLoopSleep,
|
||||
minPeersPerBlock: minPeersPerBlock)
|
||||
minPeersPerBlock: minPeersPerBlock,
|
||||
maxPeersPerBlock: maxPeersPerBlock,
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,6 +7,8 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/math
|
||||
import pkg/nitro
|
||||
import pkg/questionable/results
|
||||
@ -15,15 +17,13 @@ import ../peers
|
||||
export nitro
|
||||
export results
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
const ChainId* = 0.u256 # invalid chain id for now
|
||||
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
||||
const AmountPerChannel = (10'u64^18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||
|
||||
func openLedgerChannel*(wallet: WalletRef,
|
||||
hub: EthAddress,
|
||||
asset: EthAddress): ?!ChannelId =
|
||||
func openLedgerChannel*(
|
||||
wallet: WalletRef, hub: EthAddress, asset: EthAddress
|
||||
): ?!ChannelId =
|
||||
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
|
||||
|
||||
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
|
||||
@ -36,9 +36,7 @@ func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
|
||||
else:
|
||||
failure "no account set for peer"
|
||||
|
||||
func pay*(wallet: WalletRef,
|
||||
peer: BlockExcPeerCtx,
|
||||
amount: UInt256): ?!SignedState =
|
||||
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
|
||||
if account =? peer.account:
|
||||
let asset = Asset
|
||||
let receiver = account.address
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,12 +7,11 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/tables
|
||||
import std/monotimes
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
import std/strutils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -25,133 +24,194 @@ import ../../logutils
|
||||
logScope:
|
||||
topics = "codex pendingblocks"
|
||||
|
||||
declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests")
|
||||
declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us")
|
||||
declareGauge(
|
||||
codex_block_exchange_pending_block_requests,
|
||||
"codex blockexchange pending block requests",
|
||||
)
|
||||
declareGauge(
|
||||
codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us"
|
||||
)
|
||||
|
||||
const
|
||||
DefaultBlockTimeout* = 10.minutes
|
||||
DefaultBlockRetries* = 3000
|
||||
DefaultRetryInterval* = 2.seconds
|
||||
|
||||
type
|
||||
RetriesExhaustedError* = object of CatchableError
|
||||
BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError])
|
||||
|
||||
BlockReq* = object
|
||||
handle*: Future[Block]
|
||||
inFlight*: bool
|
||||
handle*: BlockHandle
|
||||
requested*: ?PeerId
|
||||
blockRetries*: int
|
||||
startTime*: int64
|
||||
|
||||
PendingBlocksManager* = ref object of RootObj
|
||||
blockRetries*: int = DefaultBlockRetries
|
||||
retryInterval*: Duration = DefaultRetryInterval
|
||||
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
||||
lastInclusion*: Moment # time at which we last included a block into our wantlist
|
||||
|
||||
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager,
|
||||
address: BlockAddress,
|
||||
timeout = DefaultBlockTimeout,
|
||||
inFlight = false): Future[Block] {.async.} =
|
||||
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
|
||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
## Add an event for a block
|
||||
##
|
||||
|
||||
try:
|
||||
if address notin p.blocks:
|
||||
p.blocks[address] = BlockReq(
|
||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||
inFlight: inFlight,
|
||||
startTime: getMonoTime().ticks)
|
||||
self.blocks.withValue(address, blk):
|
||||
return blk[].handle
|
||||
do:
|
||||
let blk = BlockReq(
|
||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||
requested: requested,
|
||||
blockRetries: self.blockRetries,
|
||||
startTime: getMonoTime().ticks,
|
||||
)
|
||||
self.blocks[address] = blk
|
||||
self.lastInclusion = Moment.now()
|
||||
|
||||
p.updatePendingBlockGauge()
|
||||
return await p.blocks[address].handle.wait(timeout)
|
||||
except CancelledError as exc:
|
||||
trace "Blocks cancelled", exc = exc.msg, address
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Pending WANT failed or expired", exc = exc.msg
|
||||
# no need to cancel, it is already cancelled by wait()
|
||||
raise exc
|
||||
finally:
|
||||
p.blocks.del(address)
|
||||
p.updatePendingBlockGauge()
|
||||
let handle = blk.handle
|
||||
|
||||
proc cleanUpBlock(data: pointer) {.raises: [].} =
|
||||
self.blocks.del(address)
|
||||
self.updatePendingBlockGauge()
|
||||
|
||||
handle.addCallback(cleanUpBlock)
|
||||
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
|
||||
if not handle.finished:
|
||||
handle.removeCallback(cleanUpBlock)
|
||||
cleanUpBlock(nil)
|
||||
|
||||
self.updatePendingBlockGauge()
|
||||
return handle
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid,
|
||||
timeout = DefaultBlockTimeout,
|
||||
inFlight = false): Future[Block] =
|
||||
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
|
||||
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
|
||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
self.getWantHandle(BlockAddress.init(cid), requested)
|
||||
|
||||
proc completeWantHandle*(
|
||||
self: PendingBlocksManager, address: BlockAddress, blk: Block
|
||||
) {.raises: [].} =
|
||||
## Complete a pending want handle
|
||||
self.blocks.withValue(address, blockReq):
|
||||
if not blockReq[].handle.finished:
|
||||
trace "Completing want handle from provided block", address
|
||||
blockReq[].handle.complete(blk)
|
||||
else:
|
||||
trace "Want handle already completed", address
|
||||
do:
|
||||
trace "No pending want handle found for address", address
|
||||
|
||||
proc resolve*(
|
||||
p: PendingBlocksManager,
|
||||
blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} =
|
||||
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
|
||||
) {.gcsafe, raises: [].} =
|
||||
## Resolve pending blocks
|
||||
##
|
||||
|
||||
for bd in blocksDelivery:
|
||||
p.blocks.withValue(bd.address, blockReq):
|
||||
if not blockReq.handle.finished:
|
||||
self.blocks.withValue(bd.address, blockReq):
|
||||
if not blockReq[].handle.finished:
|
||||
trace "Resolving pending block", address = bd.address
|
||||
let
|
||||
startTime = blockReq.startTime
|
||||
startTime = blockReq[].startTime
|
||||
stopTime = getMonoTime().ticks
|
||||
retrievalDurationUs = (stopTime - startTime) div 1000
|
||||
|
||||
blockReq.handle.complete(bd.blk)
|
||||
|
||||
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
|
||||
|
||||
if retrievalDurationUs > 500000:
|
||||
warn "High block retrieval time", retrievalDurationUs, address = bd.address
|
||||
else:
|
||||
trace "Block handle already finished", address = bd.address
|
||||
|
||||
proc setInFlight*(
|
||||
p: PendingBlocksManager,
|
||||
address: BlockAddress,
|
||||
inFlight = true) =
|
||||
## Set inflight status for a block
|
||||
func retries*(self: PendingBlocksManager, address: BlockAddress): int =
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].blockRetries
|
||||
do:
|
||||
result = 0
|
||||
|
||||
func decRetries*(self: PendingBlocksManager, address: BlockAddress) =
|
||||
self.blocks.withValue(address, pending):
|
||||
pending[].blockRetries -= 1
|
||||
|
||||
func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].blockRetries <= 0
|
||||
|
||||
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
## Check if a block has been requested to a peer
|
||||
##
|
||||
result = false
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].requested.isSome
|
||||
|
||||
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
|
||||
## Returns the peer that requested this block
|
||||
##
|
||||
result = PeerId.none
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].requested
|
||||
|
||||
proc markRequested*(
|
||||
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
|
||||
): bool =
|
||||
## Marks this block as having been requested to a peer
|
||||
##
|
||||
|
||||
p.blocks.withValue(address, pending):
|
||||
pending[].inFlight = inFlight
|
||||
if self.isRequested(address):
|
||||
return false
|
||||
|
||||
proc isInFlight*(
|
||||
p: PendingBlocksManager,
|
||||
address: BlockAddress): bool =
|
||||
## Check if a block is in flight
|
||||
##
|
||||
self.blocks.withValue(address, pending):
|
||||
pending[].requested = peer.some
|
||||
return true
|
||||
|
||||
p.blocks.withValue(address, pending):
|
||||
result = pending[].inFlight
|
||||
proc clearRequest*(
|
||||
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
|
||||
) =
|
||||
self.blocks.withValue(address, pending):
|
||||
if peer.isSome:
|
||||
assert peer == pending[].requested
|
||||
pending[].requested = PeerId.none
|
||||
|
||||
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
|
||||
BlockAddress.init(cid) in p.blocks
|
||||
func contains*(self: PendingBlocksManager, cid: Cid): bool =
|
||||
BlockAddress.init(cid) in self.blocks
|
||||
|
||||
proc contains*(p: PendingBlocksManager, address: BlockAddress): bool =
|
||||
address in p.blocks
|
||||
func contains*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
address in self.blocks
|
||||
|
||||
iterator wantList*(p: PendingBlocksManager): BlockAddress =
|
||||
for a in p.blocks.keys:
|
||||
iterator wantList*(self: PendingBlocksManager): BlockAddress =
|
||||
for a in self.blocks.keys:
|
||||
yield a
|
||||
|
||||
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
|
||||
for a in p.blocks.keys:
|
||||
iterator wantListBlockCids*(self: PendingBlocksManager): Cid =
|
||||
for a in self.blocks.keys:
|
||||
if not a.leaf:
|
||||
yield a.cid
|
||||
|
||||
iterator wantListCids*(p: PendingBlocksManager): Cid =
|
||||
iterator wantListCids*(self: PendingBlocksManager): Cid =
|
||||
var yieldedCids = initHashSet[Cid]()
|
||||
for a in p.blocks.keys:
|
||||
for a in self.blocks.keys:
|
||||
let cid = a.cidOrTreeCid
|
||||
if cid notin yieldedCids:
|
||||
yieldedCids.incl(cid)
|
||||
yield cid
|
||||
|
||||
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
|
||||
for v in p.blocks.values:
|
||||
iterator wantHandles*(self: PendingBlocksManager): Future[Block] =
|
||||
for v in self.blocks.values:
|
||||
yield v.handle
|
||||
|
||||
proc wantListLen*(p: PendingBlocksManager): int =
|
||||
p.blocks.len
|
||||
proc wantListLen*(self: PendingBlocksManager): int =
|
||||
self.blocks.len
|
||||
|
||||
func len*(p: PendingBlocksManager): int =
|
||||
p.blocks.len
|
||||
func len*(self: PendingBlocksManager): int =
|
||||
self.blocks.len
|
||||
|
||||
func new*(T: type PendingBlocksManager): PendingBlocksManager =
|
||||
PendingBlocksManager()
|
||||
func new*(
|
||||
T: type PendingBlocksManager,
|
||||
retries = DefaultBlockRetries,
|
||||
interval = DefaultRetryInterval,
|
||||
): PendingBlocksManager =
|
||||
PendingBlocksManager(blockRetries: retries, retryInterval: interval)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -21,24 +21,28 @@ import ../../blocktype as bt
|
||||
import ../../logutils
|
||||
import ../protobuf/blockexc as pb
|
||||
import ../protobuf/payments
|
||||
import ../../utils/trackedfutures
|
||||
|
||||
import ./networkpeer
|
||||
|
||||
export network, payments
|
||||
export networkpeer, payments
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetwork"
|
||||
|
||||
const
|
||||
Codec* = "/codex/blockexc/1.0.0"
|
||||
MaxInflight* = 100
|
||||
DefaultMaxInflight* = 100
|
||||
|
||||
type
|
||||
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
||||
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
|
||||
BlocksDeliveryHandler* =
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
|
||||
BlockPresenceHandler* =
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
|
||||
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
@ -46,6 +50,9 @@ type
|
||||
onPresence*: BlockPresenceHandler
|
||||
onAccount*: AccountHandler
|
||||
onPayment*: PaymentHandler
|
||||
onPeerJoined*: PeerEventHandler
|
||||
onPeerDeparted*: PeerEventHandler
|
||||
onPeerDropped*: PeerEventHandler
|
||||
|
||||
WantListSender* = proc(
|
||||
id: PeerId,
|
||||
@ -54,12 +61,21 @@ type
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false): Future[void] {.gcsafe.}
|
||||
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
||||
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
sendDontHave: bool = false,
|
||||
) {.async: (raises: [CancelledError]).}
|
||||
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
AccountSender* =
|
||||
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
|
||||
PaymentSender* =
|
||||
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcRequest* = object
|
||||
sendWantList*: WantListSender
|
||||
@ -76,6 +92,8 @@ type
|
||||
request*: BlockExcRequest
|
||||
getConn: ConnProvider
|
||||
inflightSema: AsyncSemaphore
|
||||
maxInflight: int = DefaultMaxInflight
|
||||
trackedFutures*: TrackedFutures = TrackedFutures()
|
||||
|
||||
proc peerId*(b: BlockExcNetwork): PeerId =
|
||||
## Return peer id
|
||||
@ -89,7 +107,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
|
||||
|
||||
return b.peerId == peer
|
||||
|
||||
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
proc send*(
|
||||
b: BlockExcNetwork, id: PeerId, msg: pb.Message
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Send message to peer
|
||||
##
|
||||
|
||||
@ -97,8 +117,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
trace "Unable to send, peer not found", peerId = id
|
||||
return
|
||||
|
||||
let peer = b.peers[id]
|
||||
try:
|
||||
let peer = b.peers[id]
|
||||
|
||||
await b.inflightSema.acquire()
|
||||
await peer.send(msg)
|
||||
except CancelledError as error:
|
||||
@ -109,9 +130,8 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
b.inflightSema.release()
|
||||
|
||||
proc handleWantList(
|
||||
b: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
list: WantList) {.async.} =
|
||||
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
|
||||
) {.async: (raises: []).} =
|
||||
## Handle incoming want list
|
||||
##
|
||||
|
||||
@ -119,14 +139,15 @@ proc handleWantList(
|
||||
await b.handlers.onWantList(peer.id, list)
|
||||
|
||||
proc sendWantList*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false): Future[void] =
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send a want message to peer
|
||||
##
|
||||
|
||||
@ -137,43 +158,41 @@ proc sendWantList*(
|
||||
priority: priority,
|
||||
cancel: cancel,
|
||||
wantType: wantType,
|
||||
sendDontHave: sendDontHave) ),
|
||||
full: full)
|
||||
sendDontHave: sendDontHave,
|
||||
)
|
||||
),
|
||||
full: full,
|
||||
)
|
||||
|
||||
b.send(id, Message(wantlist: msg))
|
||||
|
||||
proc sendWantCancellations*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress]): Future[void] {.async.} =
|
||||
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||
##
|
||||
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||
|
||||
proc handleBlocksDelivery(
|
||||
b: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: []).} =
|
||||
## Handle incoming blocks
|
||||
##
|
||||
|
||||
if not b.handlers.onBlocksDelivery.isNil:
|
||||
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
|
||||
|
||||
|
||||
proc sendBlocksDelivery*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
blocksDelivery: seq[BlockDelivery]): Future[void] =
|
||||
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send blocks to remote
|
||||
##
|
||||
|
||||
b.send(id, pb.Message(payload: blocksDelivery))
|
||||
|
||||
proc handleBlockPresence(
|
||||
b: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
presence: seq[BlockPresence]) {.async.} =
|
||||
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
|
||||
) {.async: (raises: []).} =
|
||||
## Handle block presence
|
||||
##
|
||||
|
||||
@ -181,18 +200,16 @@ proc handleBlockPresence(
|
||||
await b.handlers.onPresence(peer.id, presence)
|
||||
|
||||
proc sendBlockPresence*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
presence: seq[BlockPresence]): Future[void] =
|
||||
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send presence to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(blockPresences: @presence))
|
||||
|
||||
proc handleAccount(
|
||||
network: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
account: Account) {.async.} =
|
||||
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
||||
) {.async: (raises: []).} =
|
||||
## Handle account info
|
||||
##
|
||||
|
||||
@ -200,27 +217,24 @@ proc handleAccount(
|
||||
await network.handlers.onAccount(peer.id, account)
|
||||
|
||||
proc sendAccount*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
account: Account): Future[void] =
|
||||
b: BlockExcNetwork, id: PeerId, account: Account
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send account info to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(account: AccountMessage.init(account)))
|
||||
|
||||
proc sendPayment*(
|
||||
b: BlockExcNetwork,
|
||||
id: PeerId,
|
||||
payment: SignedState): Future[void] =
|
||||
b: BlockExcNetwork, id: PeerId, payment: SignedState
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send payment to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
|
||||
|
||||
proc handlePayment(
|
||||
network: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
payment: SignedState) {.async.} =
|
||||
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
||||
) {.async: (raises: []).} =
|
||||
## Handle payment
|
||||
##
|
||||
|
||||
@ -228,138 +242,185 @@ proc handlePayment(
|
||||
await network.handlers.onPayment(peer.id, payment)
|
||||
|
||||
proc rpcHandler(
|
||||
b: BlockExcNetwork,
|
||||
peer: NetworkPeer,
|
||||
msg: Message) {.raises: [].} =
|
||||
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||
) {.async: (raises: []).} =
|
||||
## handle rpc messages
|
||||
##
|
||||
if msg.wantList.entries.len > 0:
|
||||
asyncSpawn b.handleWantList(peer, msg.wantList)
|
||||
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
|
||||
|
||||
if msg.payload.len > 0:
|
||||
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
|
||||
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
|
||||
|
||||
if msg.blockPresences.len > 0:
|
||||
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
|
||||
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
|
||||
|
||||
if account =? Account.init(msg.account):
|
||||
asyncSpawn b.handleAccount(peer, account)
|
||||
self.trackedFutures.track(self.handleAccount(peer, account))
|
||||
|
||||
if payment =? SignedState.init(msg.payment):
|
||||
asyncSpawn b.handlePayment(peer, payment)
|
||||
self.trackedFutures.track(self.handlePayment(peer, payment))
|
||||
|
||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
## Creates or retrieves a BlockExcNetwork Peer
|
||||
##
|
||||
|
||||
if peer in b.peers:
|
||||
return b.peers.getOrDefault(peer, nil)
|
||||
if peer in self.peers:
|
||||
return self.peers.getOrDefault(peer, nil)
|
||||
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.
|
||||
async: (raises: [CancelledError])
|
||||
.} =
|
||||
try:
|
||||
return await b.switch.dial(peer, Codec)
|
||||
trace "Getting new connection stream", peer
|
||||
return await self.switch.dial(peer, Codec)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as exc:
|
||||
trace "Unable to connect to blockexc peer", exc = exc.msg
|
||||
|
||||
if not isNil(b.getConn):
|
||||
getConn = b.getConn
|
||||
if not isNil(self.getConn):
|
||||
getConn = self.getConn
|
||||
|
||||
let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} =
|
||||
b.rpcHandler(p, msg)
|
||||
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
|
||||
await self.rpcHandler(p, msg)
|
||||
|
||||
# create new pubsub peer
|
||||
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
|
||||
debug "Created new blockexc peer", peer
|
||||
|
||||
b.peers[peer] = blockExcPeer
|
||||
self.peers[peer] = blockExcPeer
|
||||
|
||||
return blockExcPeer
|
||||
|
||||
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
## Perform initial setup, such as want
|
||||
## list exchange
|
||||
##
|
||||
|
||||
discard b.getOrCreatePeer(peer)
|
||||
|
||||
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||
## Dial a peer
|
||||
##
|
||||
|
||||
if b.isSelf(peer.peerId):
|
||||
if self.isSelf(peer.peerId):
|
||||
trace "Skipping dialing self", peer = peer.peerId
|
||||
return
|
||||
|
||||
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||
if peer.peerId in self.peers:
|
||||
trace "Already connected to peer", peer = peer.peerId
|
||||
return
|
||||
|
||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||
|
||||
proc dropPeer*(
|
||||
self: BlockExcNetwork, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Dropping peer", peer
|
||||
|
||||
try:
|
||||
if not self.switch.isNil:
|
||||
await self.switch.disconnect(peer)
|
||||
except CatchableError as error:
|
||||
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
|
||||
|
||||
if not self.handlers.onPeerDropped.isNil:
|
||||
await self.handlers.onPeerDropped(peer)
|
||||
|
||||
proc handlePeerJoined*(
|
||||
self: BlockExcNetwork, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
discard self.getOrCreatePeer(peer)
|
||||
if not self.handlers.onPeerJoined.isNil:
|
||||
await self.handlers.onPeerJoined(peer)
|
||||
|
||||
proc handlePeerDeparted*(
|
||||
self: BlockExcNetwork, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
b.peers.del(peer)
|
||||
trace "Cleaning up departed peer", peer
|
||||
self.peers.del(peer)
|
||||
if not self.handlers.onPeerDeparted.isNil:
|
||||
await self.handlers.onPeerDeparted(peer)
|
||||
|
||||
method init*(b: BlockExcNetwork) =
|
||||
method init*(self: BlockExcNetwork) {.raises: [].} =
|
||||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
b.setupPeer(peerId)
|
||||
await self.handlePeerJoined(peerId)
|
||||
elif event.kind == PeerEventKind.Left:
|
||||
await self.handlePeerDeparted(peerId)
|
||||
else:
|
||||
b.dropPeer(peerId)
|
||||
warn "Unknown peer event", event
|
||||
|
||||
b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handler(
|
||||
conn: Connection, proto: string
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
let peerId = conn.peerId
|
||||
let blockexcPeer = b.getOrCreatePeer(peerId)
|
||||
await blockexcPeer.readLoop(conn) # attach read loop
|
||||
let blockexcPeer = self.getOrCreatePeer(peerId)
|
||||
await blockexcPeer.readLoop(conn) # attach read loop
|
||||
|
||||
b.handler = handle
|
||||
b.codec = Codec
|
||||
self.handler = handler
|
||||
self.codec = Codec
|
||||
|
||||
proc stop*(self: BlockExcNetwork) {.async: (raises: []).} =
|
||||
await self.trackedFutures.cancelTracked()
|
||||
|
||||
proc new*(
|
||||
T: type BlockExcNetwork,
|
||||
switch: Switch,
|
||||
connProvider: ConnProvider = nil,
|
||||
maxInflight = MaxInflight): BlockExcNetwork =
|
||||
T: type BlockExcNetwork,
|
||||
switch: Switch,
|
||||
connProvider: ConnProvider = nil,
|
||||
maxInflight = DefaultMaxInflight,
|
||||
): BlockExcNetwork =
|
||||
## Create a new BlockExcNetwork instance
|
||||
##
|
||||
|
||||
let
|
||||
self = BlockExcNetwork(
|
||||
switch: switch,
|
||||
getConn: connProvider,
|
||||
inflightSema: newAsyncSemaphore(maxInflight))
|
||||
let self = BlockExcNetwork(
|
||||
switch: switch,
|
||||
getConn: connProvider,
|
||||
inflightSema: newAsyncSemaphore(maxInflight),
|
||||
maxInflight: maxInflight,
|
||||
)
|
||||
|
||||
self.maxIncomingStreams = self.maxInflight
|
||||
|
||||
proc sendWantList(
|
||||
id: PeerId,
|
||||
cids: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false): Future[void] {.gcsafe.} =
|
||||
self.sendWantList(
|
||||
id, cids, priority, cancel,
|
||||
wantType, full, sendDontHave)
|
||||
id: PeerId,
|
||||
cids: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
|
||||
|
||||
proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} =
|
||||
proc sendWantCancellations(
|
||||
id: PeerId, addresses: seq[BlockAddress]
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendWantCancellations(id, addresses)
|
||||
|
||||
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlocksDelivery(id, blocksDelivery)
|
||||
|
||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||
proc sendPresence(
|
||||
id: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlockPresence(id, presence)
|
||||
|
||||
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
proc sendAccount(
|
||||
id: PeerId, account: Account
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendAccount(id, account)
|
||||
|
||||
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
proc sendPayment(
|
||||
id: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendPayment(id, payment)
|
||||
|
||||
self.request = BlockExcRequest(
|
||||
@ -368,7 +429,8 @@ proc new*(
|
||||
sendBlocksDelivery: sendBlocksDelivery,
|
||||
sendPresence: sendPresence,
|
||||
sendAccount: sendAccount,
|
||||
sendPayment: sendPayment)
|
||||
sendPayment: sendPayment,
|
||||
)
|
||||
|
||||
self.init()
|
||||
return self
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,8 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -17,78 +16,98 @@ import ../protobuf/blockexc
|
||||
import ../protobuf/message
|
||||
import ../../errors
|
||||
import ../../logutils
|
||||
import ../../utils/trackedfutures
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetworkpeer"
|
||||
|
||||
type
|
||||
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
||||
const DefaultYieldInterval = 50.millis
|
||||
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.}
|
||||
type
|
||||
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
|
||||
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
|
||||
|
||||
NetworkPeer* = ref object of RootObj
|
||||
id*: PeerId
|
||||
handler*: RPCHandler
|
||||
sendConn: Connection
|
||||
getConn: ConnProvider
|
||||
yieldInterval*: Duration = DefaultYieldInterval
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc connected*(b: NetworkPeer): bool =
|
||||
not(isNil(b.sendConn)) and
|
||||
not(b.sendConn.closed or b.sendConn.atEof)
|
||||
proc connected*(self: NetworkPeer): bool =
|
||||
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
|
||||
|
||||
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
||||
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
|
||||
if isNil(conn):
|
||||
trace "No connection to read from", peer = self.id
|
||||
return
|
||||
|
||||
trace "Attaching read loop", peer = self.id, connId = conn.oid
|
||||
try:
|
||||
var nextYield = Moment.now() + self.yieldInterval
|
||||
while not conn.atEof or not conn.closed:
|
||||
if Moment.now() > nextYield:
|
||||
nextYield = Moment.now() + self.yieldInterval
|
||||
trace "Yielding in read loop",
|
||||
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
let
|
||||
data = await conn.readLp(MaxMessageSize.int)
|
||||
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
||||
await b.handler(b, msg)
|
||||
trace "Received message", peer = self.id, connId = conn.oid
|
||||
await self.handler(self, msg)
|
||||
except CancelledError:
|
||||
trace "Read loop cancelled"
|
||||
except CatchableError as err:
|
||||
warn "Exception in blockexc read loop", msg = err.msg
|
||||
finally:
|
||||
warn "Detaching read loop", peer = self.id, connId = conn.oid
|
||||
if self.sendConn == conn:
|
||||
self.sendConn = nil
|
||||
await conn.close()
|
||||
|
||||
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
|
||||
if b.connected:
|
||||
return b.sendConn
|
||||
proc connect*(
|
||||
self: NetworkPeer
|
||||
): Future[Connection] {.async: (raises: [CancelledError]).} =
|
||||
if self.connected:
|
||||
trace "Already connected", peer = self.id, connId = self.sendConn.oid
|
||||
return self.sendConn
|
||||
|
||||
b.sendConn = await b.getConn()
|
||||
asyncSpawn b.readLoop(b.sendConn)
|
||||
return b.sendConn
|
||||
self.sendConn = await self.getConn()
|
||||
self.trackedFutures.track(self.readLoop(self.sendConn))
|
||||
return self.sendConn
|
||||
|
||||
proc send*(b: NetworkPeer, msg: Message) {.async.} =
|
||||
let conn = await b.connect()
|
||||
proc send*(
|
||||
self: NetworkPeer, msg: Message
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let conn = await self.connect()
|
||||
|
||||
if isNil(conn):
|
||||
warn "Unable to get send connection for peer message not sent", peer = b.id
|
||||
warn "Unable to get send connection for peer message not sent", peer = self.id
|
||||
return
|
||||
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
|
||||
proc broadcast*(b: NetworkPeer, msg: Message) =
|
||||
proc sendAwaiter() {.async.} =
|
||||
try:
|
||||
await b.send(msg)
|
||||
except CatchableError as exc:
|
||||
warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
|
||||
|
||||
asyncSpawn sendAwaiter()
|
||||
trace "Sending message", peer = self.id, connId = conn.oid
|
||||
try:
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
except CatchableError as err:
|
||||
if self.sendConn == conn:
|
||||
self.sendConn = nil
|
||||
raise newException(LPStreamError, "Failed to send message: " & err.msg)
|
||||
|
||||
func new*(
|
||||
T: type NetworkPeer,
|
||||
peer: PeerId,
|
||||
connProvider: ConnProvider,
|
||||
rpcHandler: RPCHandler): NetworkPeer =
|
||||
|
||||
doAssert(not isNil(connProvider),
|
||||
"should supply connection provider")
|
||||
T: type NetworkPeer,
|
||||
peer: PeerId,
|
||||
connProvider: ConnProvider,
|
||||
rpcHandler: RPCHandler,
|
||||
): NetworkPeer =
|
||||
doAssert(not isNil(connProvider), "should supply connection provider")
|
||||
|
||||
NetworkPeer(
|
||||
id: peer,
|
||||
getConn: connProvider,
|
||||
handler: rpcHandler)
|
||||
handler: rpcHandler,
|
||||
trackedFutures: TrackedFutures(),
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -25,29 +25,77 @@ import ../../logutils
|
||||
|
||||
export payments, nitro
|
||||
|
||||
type
|
||||
BlockExcPeerCtx* = ref object of RootObj
|
||||
id*: PeerId
|
||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||
peerWants*: seq[WantListEntry] # remote peers want lists
|
||||
exchanged*: int # times peer has exchanged with us
|
||||
lastExchange*: Moment # last time peer has exchanged with us
|
||||
account*: ?Account # ethereum account of this peer
|
||||
paymentChannel*: ?ChannelId # payment channel id
|
||||
const
|
||||
MinRefreshInterval = 1.seconds
|
||||
MaxRefreshBackoff = 36 # 36 seconds
|
||||
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
|
||||
|
||||
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
|
||||
toSeq(self.blocks.keys)
|
||||
type BlockExcPeerCtx* = ref object of RootObj
|
||||
id*: PeerId
|
||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
|
||||
exchanged*: int # times peer has exchanged with us
|
||||
refreshInProgress*: bool # indicates if a refresh is in progress
|
||||
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
|
||||
refreshBackoff*: int = 1 # backoff factor for refresh requests
|
||||
account*: ?Account # ethereum account of this peer
|
||||
paymentChannel*: ?ChannelId # payment channel id
|
||||
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
|
||||
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
|
||||
lastExchange*: Moment # last time peer has sent us a block
|
||||
activityTimeout*: Duration
|
||||
lastSentWants*: HashSet[BlockAddress]
|
||||
# track what wantList we last sent for delta updates
|
||||
|
||||
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
|
||||
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
|
||||
let staleness =
|
||||
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
|
||||
|
||||
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
|
||||
if staleness and self.refreshInProgress:
|
||||
trace "Cleaning up refresh state", peer = self.id
|
||||
self.refreshInProgress = false
|
||||
self.refreshBackoff = 1
|
||||
|
||||
staleness
|
||||
|
||||
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
address in self.blocksSent
|
||||
|
||||
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
self.blocksSent.incl(address)
|
||||
|
||||
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
self.blocksSent.excl(address)
|
||||
|
||||
proc refreshRequested*(self: BlockExcPeerCtx) =
|
||||
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
|
||||
self.refreshInProgress = true
|
||||
self.lastRefresh = Moment.now()
|
||||
|
||||
proc refreshReplied*(self: BlockExcPeerCtx) =
|
||||
self.refreshInProgress = false
|
||||
self.lastRefresh = Moment.now()
|
||||
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
|
||||
|
||||
proc havesUpdated(self: BlockExcPeerCtx) =
|
||||
self.refreshBackoff = 1
|
||||
|
||||
proc wantsUpdated*(self: BlockExcPeerCtx) =
|
||||
self.refreshBackoff = 1
|
||||
|
||||
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
|
||||
# XXX: this is ugly an inefficient, but since those will typically
|
||||
# be used in "joins", it's better to pay the price here and have
|
||||
# a linear join than to not do it and have a quadratic join.
|
||||
toHashSet(self.blocks.keys.toSeq)
|
||||
|
||||
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
address in self.blocks
|
||||
|
||||
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
||||
if presence.address notin self.blocks:
|
||||
self.havesUpdated()
|
||||
|
||||
self.blocks[presence.address] = presence
|
||||
|
||||
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
||||
@ -64,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
|
||||
price += precense[].price
|
||||
|
||||
price
|
||||
|
||||
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
## Adds a block the set of blocks that have been requested to this peer
|
||||
## (its request schedule).
|
||||
if self.blocksRequested.len == 0:
|
||||
self.lastExchange = Moment.now()
|
||||
self.blocksRequested.incl(address)
|
||||
|
||||
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
## Removes a block from the set of blocks that have been requested to this peer
|
||||
## (its request schedule).
|
||||
self.blocksRequested.excl(address)
|
||||
|
||||
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
let wasRequested = address in self.blocksRequested
|
||||
self.blocksRequested.excl(address)
|
||||
self.lastExchange = Moment.now()
|
||||
wasRequested
|
||||
|
||||
proc activityTimer*(
|
||||
self: BlockExcPeerCtx
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
## This is called by the block exchange when a block is scheduled for this peer.
|
||||
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
|
||||
## and the peer is dropped. Note that ANY block that the peer sends will reset this
|
||||
## timer for all blocks.
|
||||
##
|
||||
while true:
|
||||
let idleTime = Moment.now() - self.lastExchange
|
||||
if idleTime > self.activityTimeout:
|
||||
return
|
||||
|
||||
await sleepAsync(self.activityTimeout - idleTime)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,13 +7,12 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import std/algorithm
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -22,7 +21,6 @@ import ../protobuf/blockexc
|
||||
import ../../blocktype
|
||||
import ../../logutils
|
||||
|
||||
|
||||
import ./peercontext
|
||||
export peercontext
|
||||
|
||||
@ -33,6 +31,8 @@ type
|
||||
PeerCtxStore* = ref object of RootObj
|
||||
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
|
||||
|
||||
PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]]
|
||||
|
||||
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
|
||||
for p in self.peers.values:
|
||||
yield p
|
||||
@ -41,7 +41,10 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
|
||||
## Convenience method to check for peer precense
|
||||
##
|
||||
|
||||
a.anyIt( it.id == b )
|
||||
a.anyIt(it.id == b)
|
||||
|
||||
func peerIds*(self: PeerCtxStore): seq[PeerId] =
|
||||
toSeq(self.peers.keys)
|
||||
|
||||
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
|
||||
peerId in self.peers
|
||||
@ -59,43 +62,27 @@ func len*(self: PeerCtxStore): int =
|
||||
self.peers.len
|
||||
|
||||
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
|
||||
toSeq(self.peers.values).filterIt(address in it.peerHave)
|
||||
|
||||
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) )
|
||||
# FIXME: this is way slower and can end up leading to unexpected performance loss.
|
||||
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
|
||||
|
||||
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
|
||||
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
|
||||
|
||||
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
|
||||
# FIXME: this is way slower and can end up leading to unexpected performance loss.
|
||||
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
|
||||
|
||||
func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
# assume that the price for all leaves in a tree is the same
|
||||
let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid)
|
||||
var peers = self.peersHave(rootAddress)
|
||||
|
||||
func cmp(a, b: BlockExcPeerCtx): int =
|
||||
var
|
||||
priceA = 0.u256
|
||||
priceB = 0.u256
|
||||
|
||||
a.blocks.withValue(rootAddress, precense):
|
||||
priceA = precense[].price
|
||||
|
||||
b.blocks.withValue(rootAddress, precense):
|
||||
priceB = precense[].price
|
||||
|
||||
if priceA == priceB:
|
||||
0
|
||||
elif priceA > priceB:
|
||||
1
|
||||
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
|
||||
var res: PeersForBlock = (@[], @[])
|
||||
for peer in self:
|
||||
if address in peer:
|
||||
res.with.add(peer)
|
||||
else:
|
||||
-1
|
||||
|
||||
peers.sort(cmp)
|
||||
trace "Selected cheapest peers", peers = peers.len
|
||||
return peers
|
||||
res.without.add(peer)
|
||||
res
|
||||
|
||||
proc new*(T: type PeerCtxStore): PeerCtxStore =
|
||||
## create new instance of a peer context store
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -9,7 +9,6 @@
|
||||
|
||||
import std/hashes
|
||||
import std/sequtils
|
||||
import pkg/stew/endians2
|
||||
|
||||
import message
|
||||
|
||||
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
|
||||
export BlockDelivery, BlockPresenceType, BlockPresence
|
||||
export AccountMessage, StateChannelUpdate
|
||||
|
||||
proc hash*(a: BlockAddress): Hash =
|
||||
if a.leaf:
|
||||
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||
hash(data)
|
||||
else:
|
||||
hash(a.cid.data.buffer)
|
||||
|
||||
proc hash*(e: WantListEntry): Hash =
|
||||
hash(e.address)
|
||||
|
||||
@ -42,7 +34,6 @@ proc `==`*(a: WantListEntry, b: BlockAddress): bool =
|
||||
proc `<`*(a, b: WantListEntry): bool =
|
||||
a.priority < b.priority
|
||||
|
||||
|
||||
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
|
||||
return a.address == b
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# Protocol of data exchange between Codex nodes
|
||||
# Protocol of data exchange between Logos Storage nodes
|
||||
# and Protobuf encoder/decoder for these messages.
|
||||
#
|
||||
# Eventually all this code should be auto-generated from message.proto.
|
||||
@ -20,40 +20,44 @@ const
|
||||
|
||||
type
|
||||
WantType* = enum
|
||||
WantBlock = 0,
|
||||
WantBlock = 0
|
||||
WantHave = 1
|
||||
|
||||
WantListEntry* = object
|
||||
address*: BlockAddress
|
||||
priority*: int32 # The priority (normalized). default to 1
|
||||
cancel*: bool # Whether this revokes an entry
|
||||
wantType*: WantType # Note: defaults to enum 0, ie Block
|
||||
sendDontHave*: bool # Note: defaults to false
|
||||
inFlight*: bool # Whether block sending is in progress. Not serialized.
|
||||
# XXX: I think explicit priority is pointless as the peer will request
|
||||
# the blocks in the order it wants to receive them, and all we have to
|
||||
# do is process those in the same order as we send them back. It also
|
||||
# complicates things for no reason at the moment, as the priority is
|
||||
# always set to 0.
|
||||
priority*: int32 # The priority (normalized). default to 1
|
||||
cancel*: bool # Whether this revokes an entry
|
||||
wantType*: WantType # Note: defaults to enum 0, ie Block
|
||||
sendDontHave*: bool # Note: defaults to false
|
||||
|
||||
WantList* = object
|
||||
entries*: seq[WantListEntry] # A list of wantList entries
|
||||
full*: bool # Whether this is the full wantList. default to false
|
||||
entries*: seq[WantListEntry] # A list of wantList entries
|
||||
full*: bool # Whether this is the full wantList. default to false
|
||||
|
||||
BlockDelivery* = object
|
||||
blk*: Block
|
||||
address*: BlockAddress
|
||||
proof*: ?CodexProof # Present only if `address.leaf` is true
|
||||
proof*: ?CodexProof # Present only if `address.leaf` is true
|
||||
|
||||
BlockPresenceType* = enum
|
||||
Have = 0,
|
||||
Have = 0
|
||||
DontHave = 1
|
||||
|
||||
BlockPresence* = object
|
||||
address*: BlockAddress
|
||||
`type`*: BlockPresenceType
|
||||
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
|
||||
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
|
||||
|
||||
AccountMessage* = object
|
||||
address*: seq[byte] # Ethereum address to which payments should be made
|
||||
address*: seq[byte] # Ethereum address to which payments should be made
|
||||
|
||||
StateChannelUpdate* = object
|
||||
update*: seq[byte] # Signed Nitro state, serialized as JSON
|
||||
update*: seq[byte] # Signed Nitro state, serialized as JSON
|
||||
|
||||
Message* = object
|
||||
wantList*: WantList
|
||||
@ -97,7 +101,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.blk.cid.data.buffer)
|
||||
ipb.write(2, value.blk.data)
|
||||
ipb.write(3, value.address)
|
||||
@ -128,7 +132,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc protobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.wantList)
|
||||
for v in value.payload:
|
||||
ipb.write(3, v)
|
||||
@ -140,7 +144,6 @@ proc protobufEncode*(value: Message): seq[byte] =
|
||||
ipb.finish()
|
||||
ipb.buffer
|
||||
|
||||
|
||||
#
|
||||
# Decoding Message from seq[byte] in Protobuf format
|
||||
#
|
||||
@ -151,22 +154,22 @@ proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
|
||||
field: uint64
|
||||
cidBuf = newSeq[byte]()
|
||||
|
||||
if ? pb.getField(1, field):
|
||||
if ?pb.getField(1, field):
|
||||
leaf = bool(field)
|
||||
|
||||
if leaf:
|
||||
var
|
||||
treeCid: Cid
|
||||
index: Natural
|
||||
if ? pb.getField(2, cidBuf):
|
||||
treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ? pb.getField(3, field):
|
||||
if ?pb.getField(2, cidBuf):
|
||||
treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ?pb.getField(3, field):
|
||||
index = field
|
||||
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
|
||||
else:
|
||||
var cid: Cid
|
||||
if ? pb.getField(4, cidBuf):
|
||||
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ?pb.getField(4, cidBuf):
|
||||
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
value = BlockAddress(leaf: false, cid: cid)
|
||||
|
||||
ok(value)
|
||||
@ -176,15 +179,15 @@ proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry]
|
||||
value = WantListEntry()
|
||||
field: uint64
|
||||
ipb: ProtoBuffer
|
||||
if ? pb.getField(1, ipb):
|
||||
value.address = ? BlockAddress.decode(ipb)
|
||||
if ? pb.getField(2, field):
|
||||
if ?pb.getField(1, ipb):
|
||||
value.address = ?BlockAddress.decode(ipb)
|
||||
if ?pb.getField(2, field):
|
||||
value.priority = int32(field)
|
||||
if ? pb.getField(3, field):
|
||||
if ?pb.getField(3, field):
|
||||
value.cancel = bool(field)
|
||||
if ? pb.getField(4, field):
|
||||
if ?pb.getField(4, field):
|
||||
value.wantType = WantType(field)
|
||||
if ? pb.getField(5, field):
|
||||
if ?pb.getField(5, field):
|
||||
value.sendDontHave = bool(field)
|
||||
ok(value)
|
||||
|
||||
@ -193,10 +196,10 @@ proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
|
||||
value = WantList()
|
||||
field: uint64
|
||||
sublist: seq[seq[byte]]
|
||||
if ? pb.getRepeatedField(1, sublist):
|
||||
if ?pb.getRepeatedField(1, sublist):
|
||||
for item in sublist:
|
||||
value.entries.add(? WantListEntry.decode(initProtoBuffer(item)))
|
||||
if ? pb.getField(2, field):
|
||||
value.entries.add(?WantListEntry.decode(initProtoBuffer(item)))
|
||||
if ?pb.getField(2, field):
|
||||
value.full = bool(field)
|
||||
ok(value)
|
||||
|
||||
@ -208,17 +211,18 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery]
|
||||
cid: Cid
|
||||
ipb: ProtoBuffer
|
||||
|
||||
if ? pb.getField(1, cidBuf):
|
||||
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ? pb.getField(2, dataBuf):
|
||||
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ? pb.getField(3, ipb):
|
||||
value.address = ? BlockAddress.decode(ipb)
|
||||
if ?pb.getField(1, cidBuf):
|
||||
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ?pb.getField(2, dataBuf):
|
||||
value.blk =
|
||||
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ?pb.getField(3, ipb):
|
||||
value.address = ?BlockAddress.decode(ipb)
|
||||
|
||||
if value.address.leaf:
|
||||
var proofBuf = newSeq[byte]()
|
||||
if ? pb.getField(4, proofBuf):
|
||||
let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
if ?pb.getField(4, proofBuf):
|
||||
let proof = ?CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||
value.proof = proof.some
|
||||
else:
|
||||
value.proof = CodexProof.none
|
||||
@ -232,42 +236,42 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
|
||||
value = BlockPresence()
|
||||
field: uint64
|
||||
ipb: ProtoBuffer
|
||||
if ? pb.getField(1, ipb):
|
||||
value.address = ? BlockAddress.decode(ipb)
|
||||
if ? pb.getField(2, field):
|
||||
if ?pb.getField(1, ipb):
|
||||
value.address = ?BlockAddress.decode(ipb)
|
||||
if ?pb.getField(2, field):
|
||||
value.`type` = BlockPresenceType(field)
|
||||
discard ? pb.getField(3, value.price)
|
||||
discard ?pb.getField(3, value.price)
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
|
||||
var
|
||||
value = AccountMessage()
|
||||
discard ? pb.getField(1, value.address)
|
||||
var value = AccountMessage()
|
||||
discard ?pb.getField(1, value.address)
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChannelUpdate] =
|
||||
var
|
||||
value = StateChannelUpdate()
|
||||
discard ? pb.getField(1, value.update)
|
||||
proc decode*(
|
||||
_: type StateChannelUpdate, pb: ProtoBuffer
|
||||
): ProtoResult[StateChannelUpdate] =
|
||||
var value = StateChannelUpdate()
|
||||
discard ?pb.getField(1, value.update)
|
||||
ok(value)
|
||||
|
||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
var
|
||||
value = Message()
|
||||
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
||||
pb = initProtoBuffer(msg)
|
||||
ipb: ProtoBuffer
|
||||
sublist: seq[seq[byte]]
|
||||
if ? pb.getField(1, ipb):
|
||||
value.wantList = ? WantList.decode(ipb)
|
||||
if ? pb.getRepeatedField(3, sublist):
|
||||
if ?pb.getField(1, ipb):
|
||||
value.wantList = ?WantList.decode(ipb)
|
||||
if ?pb.getRepeatedField(3, sublist):
|
||||
for item in sublist:
|
||||
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
|
||||
if ? pb.getRepeatedField(4, sublist):
|
||||
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
|
||||
if ?pb.getRepeatedField(4, sublist):
|
||||
for item in sublist:
|
||||
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))
|
||||
discard ? pb.getField(5, value.pendingBytes)
|
||||
if ? pb.getField(6, ipb):
|
||||
value.account = ? AccountMessage.decode(ipb)
|
||||
if ? pb.getField(7, ipb):
|
||||
value.payment = ? StateChannelUpdate.decode(ipb)
|
||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||
discard ?pb.getField(5, value.pendingBytes)
|
||||
if ?pb.getField(6, ipb):
|
||||
value.account = ?AccountMessage.decode(ipb)
|
||||
if ?pb.getField(7, ipb):
|
||||
value.payment = ?StateChannelUpdate.decode(ipb)
|
||||
ok(value)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Protocol of data exchange between Codex nodes.
|
||||
// Protocol of data exchange between Logos Storage nodes.
|
||||
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/nitro
|
||||
import pkg/questionable
|
||||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
export AccountMessage
|
||||
@ -11,11 +12,8 @@ export StateChannelUpdate
|
||||
export stint
|
||||
export nitro
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
type
|
||||
Account* = object
|
||||
address*: EthAddress
|
||||
type Account* = object
|
||||
address*: EthAddress
|
||||
|
||||
func init*(_: type AccountMessage, account: Account): AccountMessage =
|
||||
AccountMessage(address: @(account.address.toArray))
|
||||
@ -24,7 +22,7 @@ func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress =
|
||||
var address: array[20, byte]
|
||||
if bytes.len != address.len:
|
||||
return EthAddress.none
|
||||
for i in 0..<address.len:
|
||||
for i in 0 ..< address.len:
|
||||
address[i] = bytes[i]
|
||||
EthAddress(address).some
|
||||
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import libp2p
|
||||
import pkg/stint
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
import ../../blocktype
|
||||
@ -11,8 +12,6 @@ export questionable
|
||||
export stint
|
||||
export BlockPresenceType
|
||||
|
||||
upraises.push: {.upraises: [].}
|
||||
|
||||
type
|
||||
PresenceMessage* = blockexc.BlockPresence
|
||||
Presence* = object
|
||||
@ -32,15 +31,12 @@ func init*(_: type Presence, message: PresenceMessage): ?Presence =
|
||||
some Presence(
|
||||
address: message.address,
|
||||
have: message.`type` == BlockPresenceType.Have,
|
||||
price: price
|
||||
price: price,
|
||||
)
|
||||
|
||||
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
||||
PresenceMessage(
|
||||
address: presence.address,
|
||||
`type`: if presence.have:
|
||||
BlockPresenceType.Have
|
||||
else:
|
||||
BlockPresenceType.DontHave,
|
||||
price: @(presence.price.toBytesBE)
|
||||
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
|
||||
price: @(presence.price.toBytesBE),
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -9,15 +9,14 @@
|
||||
|
||||
import std/tables
|
||||
import std/sugar
|
||||
import std/hashes
|
||||
|
||||
export tables
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p/[cid, multicodec, multihash]
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stew/[byteutils, endians2]
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -49,16 +48,16 @@ logutils.formatIt(LogFormat.textLines, BlockAddress):
|
||||
else:
|
||||
"cid: " & shortLog($it.cid)
|
||||
|
||||
logutils.formatIt(LogFormat.json, BlockAddress): %it
|
||||
logutils.formatIt(LogFormat.json, BlockAddress):
|
||||
%it
|
||||
|
||||
proc `==`*(a, b: BlockAddress): bool =
|
||||
a.leaf == b.leaf and
|
||||
(
|
||||
if a.leaf:
|
||||
a.treeCid == b.treeCid and a.index == b.index
|
||||
else:
|
||||
a.cid == b.cid
|
||||
)
|
||||
a.leaf == b.leaf and (
|
||||
if a.leaf:
|
||||
a.treeCid == b.treeCid and a.index == b.index
|
||||
else:
|
||||
a.cid == b.cid
|
||||
)
|
||||
|
||||
proc `$`*(a: BlockAddress): string =
|
||||
if a.leaf:
|
||||
@ -66,11 +65,15 @@ proc `$`*(a: BlockAddress): string =
|
||||
else:
|
||||
"cid: " & $a.cid
|
||||
|
||||
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||
proc hash*(a: BlockAddress): Hash =
|
||||
if a.leaf:
|
||||
a.treeCid
|
||||
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||
hash(data)
|
||||
else:
|
||||
a.cid
|
||||
hash(a.cid.data.buffer)
|
||||
|
||||
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||
if a.leaf: a.treeCid else: a.cid
|
||||
|
||||
proc address*(b: Block): BlockAddress =
|
||||
BlockAddress(leaf: false, cid: b.cid)
|
||||
@ -86,57 +89,55 @@ proc `$`*(b: Block): string =
|
||||
result &= "\ndata: " & string.fromBytes(b.data)
|
||||
|
||||
func new*(
|
||||
T: type Block,
|
||||
data: openArray[byte] = [],
|
||||
version = CIDv1,
|
||||
mcodec = Sha256HashCodec,
|
||||
codec = BlockCodec): ?!Block =
|
||||
T: type Block,
|
||||
data: openArray[byte] = [],
|
||||
version = CIDv1,
|
||||
mcodec = Sha256HashCodec,
|
||||
codec = BlockCodec,
|
||||
): ?!Block =
|
||||
## creates a new block for both storage and network IO
|
||||
##
|
||||
|
||||
let
|
||||
hash = ? MultiHash.digest($mcodec, data).mapFailure
|
||||
cid = ? Cid.init(version, codec, hash).mapFailure
|
||||
hash = ?MultiHash.digest($mcodec, data).mapFailure
|
||||
cid = ?Cid.init(version, codec, hash).mapFailure
|
||||
|
||||
# TODO: If the hash is `>=` to the data,
|
||||
# use the Cid as a container!
|
||||
Block(
|
||||
cid: cid,
|
||||
data: @data).success
|
||||
|
||||
Block(cid: cid, data: @data).success
|
||||
|
||||
proc new*(
|
||||
T: type Block,
|
||||
cid: Cid,
|
||||
data: openArray[byte],
|
||||
verify: bool = true
|
||||
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
|
||||
): ?!Block =
|
||||
## creates a new block for both storage and network IO
|
||||
##
|
||||
|
||||
if verify:
|
||||
let
|
||||
mhash = ? cid.mhash.mapFailure
|
||||
computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure
|
||||
computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
|
||||
mhash = ?cid.mhash.mapFailure
|
||||
computedMhash = ?MultiHash.digest($mhash.mcodec, data).mapFailure
|
||||
computedCid = ?Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
|
||||
if computedCid != cid:
|
||||
return "Cid doesn't match the data".failure
|
||||
|
||||
return Block(
|
||||
cid: cid,
|
||||
data: @data
|
||||
).success
|
||||
return Block(cid: cid, data: @data).success
|
||||
|
||||
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
|
||||
emptyCid(version, hcodec, BlockCodec)
|
||||
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
|
||||
emptyCid(version, hcodec, BlockCodec).flatMap(
|
||||
(cid: Cid) => Block.new(cid = cid, data = @[])
|
||||
)
|
||||
|
||||
proc emptyBlock*(cid: Cid): ?!Block =
|
||||
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
||||
emptyBlock(cid.cidver, mhash.mcodec))
|
||||
cid.mhash.mapFailure.flatMap(
|
||||
(mhash: MultiHash) => emptyBlock(cid.cidver, mhash.mcodec)
|
||||
)
|
||||
|
||||
proc isEmpty*(cid: Cid): bool =
|
||||
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
||||
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
|
||||
success(cid) ==
|
||||
cid.mhash.mapFailure.flatMap(
|
||||
(mhash: MultiHash) => emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)
|
||||
)
|
||||
|
||||
proc isEmpty*(blk: Block): bool =
|
||||
blk.cid.isEmpty
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -9,9 +9,7 @@
|
||||
|
||||
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
@ -23,20 +21,22 @@ import ./logutils
|
||||
|
||||
export blocktype
|
||||
|
||||
const
|
||||
DefaultChunkSize* = DefaultBlockSize
|
||||
const DefaultChunkSize* = DefaultBlockSize
|
||||
|
||||
type
|
||||
# default reader type
|
||||
ChunkerError* = object of CatchableError
|
||||
ChunkBuffer* = ptr UncheckedArray[byte]
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
|
||||
async: (raises: [ChunkerError, CancelledError])
|
||||
.}
|
||||
|
||||
# Reader that splits input data into fixed-size chunks
|
||||
Chunker* = ref object
|
||||
reader*: Reader # Procedure called to actually read the data
|
||||
offset*: int # Bytes read so far (position in the stream)
|
||||
chunkSize*: NBytes # Size of each chunk
|
||||
pad*: bool # Pad last chunk to chunkSize?
|
||||
reader*: Reader # Procedure called to actually read the data
|
||||
offset*: int # Bytes read so far (position in the stream)
|
||||
chunkSize*: NBytes # Size of each chunk
|
||||
pad*: bool # Pad last chunk to chunkSize?
|
||||
|
||||
FileChunker* = Chunker
|
||||
LPStreamChunker* = Chunker
|
||||
@ -60,30 +60,21 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
||||
return move buff
|
||||
|
||||
proc new*(
|
||||
T: type Chunker,
|
||||
reader: Reader,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true
|
||||
T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true
|
||||
): Chunker =
|
||||
## create a new Chunker instance
|
||||
##
|
||||
Chunker(
|
||||
reader: reader,
|
||||
offset: 0,
|
||||
chunkSize: chunkSize,
|
||||
pad: pad)
|
||||
Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad)
|
||||
|
||||
proc new*(
|
||||
T: type LPStreamChunker,
|
||||
stream: LPStream,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true
|
||||
T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true
|
||||
): LPStreamChunker =
|
||||
## create the default File chunker
|
||||
##
|
||||
|
||||
proc reader(data: ChunkBuffer, len: int): Future[int]
|
||||
{.gcsafe, async, raises: [Defect].} =
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var res = 0
|
||||
try:
|
||||
while res < len:
|
||||
@ -94,29 +85,24 @@ proc new*(
|
||||
raise error
|
||||
except LPStreamError as error:
|
||||
error "LPStream error", err = error.msg
|
||||
raise error
|
||||
raise newException(ChunkerError, "LPStream error", error)
|
||||
except CatchableError as exc:
|
||||
error "CatchableError exception", exc = exc.msg
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
return res
|
||||
|
||||
LPStreamChunker.new(
|
||||
reader = reader,
|
||||
chunkSize = chunkSize,
|
||||
pad = pad)
|
||||
LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
|
||||
|
||||
proc new*(
|
||||
T: type FileChunker,
|
||||
file: File,
|
||||
chunkSize = DefaultChunkSize,
|
||||
pad = true
|
||||
T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true
|
||||
): FileChunker =
|
||||
## create the default File chunker
|
||||
##
|
||||
|
||||
proc reader(data: ChunkBuffer, len: int): Future[int]
|
||||
{.gcsafe, async, raises: [Defect].} =
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var total = 0
|
||||
try:
|
||||
while total < len:
|
||||
@ -135,7 +121,4 @@ proc new*(
|
||||
|
||||
return total
|
||||
|
||||
FileChunker.new(
|
||||
reader = reader,
|
||||
chunkSize = chunkSize,
|
||||
pad = pad)
|
||||
FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/stew/endians2
|
||||
import pkg/upraises
|
||||
import pkg/stint
|
||||
|
||||
type
|
||||
@ -8,10 +9,12 @@ type
|
||||
SecondsSince1970* = int64
|
||||
Timeout* = object of CatchableError
|
||||
|
||||
method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} =
|
||||
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
||||
method waitUntil*(
|
||||
clock: Clock, time: SecondsSince1970
|
||||
) {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method start*(clock: Clock) {.base, async.} =
|
||||
@ -20,9 +23,9 @@ method start*(clock: Clock) {.base, async.} =
|
||||
method stop*(clock: Clock) {.base, async.} =
|
||||
discard
|
||||
|
||||
proc withTimeout*(future: Future[void],
|
||||
clock: Clock,
|
||||
expiry: SecondsSince1970) {.async.} =
|
||||
proc withTimeout*(
|
||||
future: Future[void], clock: Clock, expiry: SecondsSince1970
|
||||
) {.async.} =
|
||||
let timeout = clock.waitUntil(expiry)
|
||||
try:
|
||||
await future or timeout
|
||||
@ -40,5 +43,8 @@ proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 =
|
||||
let asUint = uint64.fromBytes(bytes)
|
||||
cast[int64](asUint)
|
||||
|
||||
proc toSecondsSince1970*(num: uint64): SecondsSince1970 =
|
||||
cast[int64](num)
|
||||
|
||||
proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 =
|
||||
bigint.truncate(int64)
|
||||
|
||||
263
codex/codex.nim
263
codex/codex.nim
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -12,23 +12,23 @@ import std/strutils
|
||||
import std/os
|
||||
import std/tables
|
||||
import std/cpuinfo
|
||||
import std/net
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/taskpools
|
||||
import pkg/presto
|
||||
import pkg/libp2p
|
||||
import pkg/confutils
|
||||
import pkg/confutils/defs
|
||||
import pkg/nitro
|
||||
import pkg/stew/io2
|
||||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/datastore
|
||||
import pkg/ethers except Rng
|
||||
import pkg/stew/io2
|
||||
import pkg/taskpools
|
||||
|
||||
import ./node
|
||||
import ./conf
|
||||
import ./rng
|
||||
import ./rng as random
|
||||
import ./rest/api
|
||||
import ./stores
|
||||
import ./slots
|
||||
@ -44,6 +44,7 @@ import ./utils/addrutils
|
||||
import ./namespaces
|
||||
import ./codextypes
|
||||
import ./logutils
|
||||
import ./nat
|
||||
|
||||
logScope:
|
||||
topics = "codex node"
|
||||
@ -56,10 +57,20 @@ type
|
||||
repoStore: RepoStore
|
||||
maintenance: BlockMaintainer
|
||||
taskpool: Taskpool
|
||||
isStarted: bool
|
||||
|
||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||
EthWallet = ethers.Wallet
|
||||
|
||||
func config*(self: CodexServer): CodexConf =
|
||||
return self.config
|
||||
|
||||
func node*(self: CodexServer): CodexNodeRef =
|
||||
return self.codexNode
|
||||
|
||||
func repoStore*(self: CodexServer): RepoStore =
|
||||
return self.repoStore
|
||||
|
||||
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||
var sleepTime = 1
|
||||
trace "Checking sync state of Ethereum provider..."
|
||||
@ -70,8 +81,7 @@ proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||
inc sleepTime
|
||||
trace "Ethereum provider is synced."
|
||||
|
||||
proc bootstrapInteractions(
|
||||
s: CodexServer): Future[void] {.async.} =
|
||||
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||
## bootstrap interactions and return contracts
|
||||
## using clients, hosts, validators pairings
|
||||
##
|
||||
@ -84,7 +94,9 @@ proc bootstrapInteractions(
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||
let provider = JsonRpcProvider.new(
|
||||
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
|
||||
)
|
||||
await waitForSync(provider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
@ -104,13 +116,15 @@ proc bootstrapInteractions(
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config)
|
||||
let deploy = Deployment.new(provider, config.marketplaceAddress)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace, config.rewardRecipient)
|
||||
let market = OnChainMarket.new(
|
||||
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
|
||||
)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
@ -124,7 +138,7 @@ proc bootstrapInteractions(
|
||||
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when codex_enable_proof_failures:
|
||||
when storage_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
@ -133,172 +147,232 @@ proc bootstrapInteractions(
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
if error =? (await market.loadConfig()).errorOption:
|
||||
fatal "Cannot load market configuration", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
|
||||
if config.validator:
|
||||
without validationConfig =? ValidationConfig.init(
|
||||
config.validatorMaxSlots,
|
||||
config.validatorGroups,
|
||||
config.validatorGroupIndex), err:
|
||||
error "Invalid validation parameters", err = err.msg
|
||||
quit QuitFailure
|
||||
without validationConfig =?
|
||||
ValidationConfig.init(
|
||||
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
|
||||
), err:
|
||||
error "Invalid validation parameters", err = err.msg
|
||||
quit QuitFailure
|
||||
let validation = Validation.new(clock, market, validationConfig)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
trace "Starting codex node", config = $s.config
|
||||
if s.isStarted:
|
||||
warn "Storage server already started, skipping"
|
||||
return
|
||||
|
||||
trace "Starting Storage node", config = $s.config
|
||||
await s.repoStore.start()
|
||||
|
||||
s.maintenance.start()
|
||||
|
||||
await s.codexNode.switch.start()
|
||||
|
||||
let
|
||||
# TODO: Can't define these as constants, pity
|
||||
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
|
||||
.expect("Should create multiaddress")
|
||||
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
|
||||
.expect("Should create multiaddress")
|
||||
loopBackAddrIp = MultiAddress.init("/ip4/127.0.0.1/")
|
||||
.expect("Should create multiaddress")
|
||||
|
||||
# announce addresses should be set to bound addresses,
|
||||
# but the IP should be mapped to the provided nat ip
|
||||
announceAddrs = s.codexNode.switch.peerInfo.addrs.mapIt:
|
||||
block:
|
||||
let
|
||||
listenIPPart = it[multiCodec("ip4")].expect("Should get IP")
|
||||
|
||||
if listenIPPart == anyAddrIp or
|
||||
(listenIPPart == loopBackAddrIp and natIpPart != loopBackAddrIp):
|
||||
it.remapAddr(s.config.nat.some)
|
||||
else:
|
||||
it
|
||||
let (announceAddrs, discoveryAddrs) = nattedAddress(
|
||||
s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort
|
||||
)
|
||||
|
||||
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
||||
s.codexNode.discovery.updateDhtRecord(s.config.nat, s.config.discoveryPort)
|
||||
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
|
||||
|
||||
await s.bootstrapInteractions()
|
||||
await s.codexNode.start()
|
||||
s.restServer.start()
|
||||
|
||||
if s.restServer != nil:
|
||||
s.restServer.start()
|
||||
|
||||
s.isStarted = true
|
||||
|
||||
proc stop*(s: CodexServer) {.async.} =
|
||||
notice "Stopping codex node"
|
||||
if not s.isStarted:
|
||||
warn "Storage is not started"
|
||||
return
|
||||
|
||||
notice "Stopping Storage node"
|
||||
|
||||
s.taskpool.syncAll()
|
||||
s.taskpool.shutdown()
|
||||
var futures =
|
||||
@[
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop(),
|
||||
]
|
||||
|
||||
await allFuturesThrowing(
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop())
|
||||
if s.restServer != nil:
|
||||
futures.add(s.restServer.stop())
|
||||
|
||||
let res = await noCancel allFinishedFailed[void](futures)
|
||||
|
||||
if res.failure.len > 0:
|
||||
error "Failed to stop Storage node", failures = res.failure.len
|
||||
raiseAssert "Failed to stop Storage node"
|
||||
|
||||
proc close*(s: CodexServer) {.async.} =
|
||||
var futures = @[s.codexNode.close(), s.repoStore.close()]
|
||||
|
||||
let res = await noCancel allFinishedFailed[void](futures)
|
||||
|
||||
if not s.taskpool.isNil:
|
||||
try:
|
||||
s.taskpool.shutdown()
|
||||
except Exception as exc:
|
||||
error "Failed to stop the taskpool", failures = res.failure.len
|
||||
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
|
||||
|
||||
if res.failure.len > 0:
|
||||
error "Failed to close Storage node", failures = res.failure.len
|
||||
raiseAssert "Failed to close Storage node"
|
||||
|
||||
proc shutdown*(server: CodexServer) {.async.} =
|
||||
await server.stop()
|
||||
await server.close()
|
||||
|
||||
proc new*(
|
||||
T: type CodexServer,
|
||||
config: CodexConf,
|
||||
privateKey: CodexPrivateKey): CodexServer =
|
||||
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
||||
): CodexServer =
|
||||
## create CodexServer including setting up datastore, repostore, etc
|
||||
let
|
||||
switch = SwitchBuilder
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withPrivateKey(privateKey)
|
||||
.withAddresses(config.listenAddrs)
|
||||
.withRng(Rng.instance())
|
||||
.withRng(random.Rng.instance())
|
||||
.withNoise()
|
||||
.withMplex(5.minutes, 5.minutes)
|
||||
.withMaxConnections(config.maxPeers)
|
||||
.withAgentVersion(config.agentString)
|
||||
.withSignedPeerRecord(true)
|
||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
||||
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
|
||||
.build()
|
||||
|
||||
var
|
||||
cache: CacheStore = nil
|
||||
taskpool: Taskpool
|
||||
|
||||
try:
|
||||
if config.numThreads == ThreadCount(0):
|
||||
taskpool = Taskpool.new(numThreads = min(countProcessors(), 16))
|
||||
else:
|
||||
taskpool = Taskpool.new(numThreads = int(config.numThreads))
|
||||
info "Threadpool started", numThreads = taskpool.numThreads
|
||||
except CatchableError as exc:
|
||||
raiseAssert("Failure in taskpool initialization:" & exc.msg)
|
||||
|
||||
if config.cacheSize > 0'nb:
|
||||
cache = CacheStore.new(cacheSize = config.cacheSize)
|
||||
## Is unused?
|
||||
|
||||
let
|
||||
discoveryDir = config.dataDir / CodexDhtNamespace
|
||||
let discoveryDir = config.dataDir / CodexDhtNamespace
|
||||
|
||||
if io2.createPath(discoveryDir).isErr:
|
||||
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
|
||||
trace "Unable to create discovery directory for block store",
|
||||
discoveryDir = discoveryDir
|
||||
raise (ref Defect)(
|
||||
msg: "Unable to create discovery directory for block store: " & discoveryDir)
|
||||
msg: "Unable to create discovery directory for block store: " & discoveryDir
|
||||
)
|
||||
|
||||
let
|
||||
discoveryStore = Datastore(
|
||||
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
|
||||
.expect("Should create discovery datastore!"))
|
||||
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect(
|
||||
"Should create discovery datastore!"
|
||||
)
|
||||
)
|
||||
|
||||
discovery = Discovery.new(
|
||||
switch.peerInfo.privateKey,
|
||||
announceAddrs = config.listenAddrs,
|
||||
bindIp = config.discoveryIp,
|
||||
bindPort = config.discoveryPort,
|
||||
bootstrapNodes = config.bootstrapNodes,
|
||||
store = discoveryStore)
|
||||
store = discoveryStore,
|
||||
)
|
||||
|
||||
wallet = WalletRef.new(EthPrivateKey.random())
|
||||
network = BlockExcNetwork.new(switch)
|
||||
|
||||
repoData = case config.repoKind
|
||||
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
|
||||
.expect("Should create repo file data store!"))
|
||||
of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir)
|
||||
.expect("Should create repo SQLite data store!"))
|
||||
of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir)
|
||||
.expect("Should create repo LevelDB data store!"))
|
||||
repoData =
|
||||
case config.repoKind
|
||||
of repoFS:
|
||||
Datastore(
|
||||
FSDatastore.new($config.dataDir, depth = 5).expect(
|
||||
"Should create repo file data store!"
|
||||
)
|
||||
)
|
||||
of repoSQLite:
|
||||
Datastore(
|
||||
SQLiteDatastore.new($config.dataDir).expect(
|
||||
"Should create repo SQLite data store!"
|
||||
)
|
||||
)
|
||||
of repoLevelDb:
|
||||
Datastore(
|
||||
LevelDbDatastore.new($config.dataDir).expect(
|
||||
"Should create repo LevelDB data store!"
|
||||
)
|
||||
)
|
||||
|
||||
repoStore = RepoStore.new(
|
||||
repoDs = repoData,
|
||||
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace)
|
||||
.expect("Should create metadata store!"),
|
||||
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect(
|
||||
"Should create metadata store!"
|
||||
),
|
||||
quotaMaxBytes = config.storageQuota,
|
||||
blockTtl = config.blockTtl)
|
||||
blockTtl = config.blockTtl,
|
||||
)
|
||||
|
||||
maintenance = BlockMaintainer.new(
|
||||
repoStore,
|
||||
interval = config.blockMaintenanceInterval,
|
||||
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks)
|
||||
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
|
||||
)
|
||||
|
||||
peerStore = PeerCtxStore.new()
|
||||
pendingBlocks = PendingBlocksManager.new()
|
||||
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
|
||||
advertiser = Advertiser.new(repoStore, discovery)
|
||||
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks)
|
||||
blockDiscovery =
|
||||
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(
|
||||
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
|
||||
)
|
||||
store = NetworkStore.new(engine, repoStore)
|
||||
prover = if config.prover:
|
||||
let backend = config.initializeBackend().expect("Unable to create prover backend.")
|
||||
some Prover.new(store, backend, config.numProofSamples)
|
||||
else:
|
||||
none Prover
|
||||
|
||||
taskpool = Taskpool.new(num_threads = countProcessors())
|
||||
prover =
|
||||
if config.prover:
|
||||
let backend =
|
||||
config.initializeBackend().expect("Unable to create prover backend.")
|
||||
some Prover.new(store, backend, config.numProofSamples)
|
||||
else:
|
||||
none Prover
|
||||
|
||||
codexNode = CodexNodeRef.new(
|
||||
switch = switch,
|
||||
networkStore = store,
|
||||
engine = engine,
|
||||
prover = prover,
|
||||
discovery = discovery,
|
||||
taskpool = taskpool)
|
||||
prover = prover,
|
||||
taskPool = taskpool,
|
||||
)
|
||||
|
||||
restServer = RestServerRef.new(
|
||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||
initTAddress(config.apiBindAddress , config.apiPort),
|
||||
bufferSize = (1024 * 64),
|
||||
maxRequestBodySize = int.high)
|
||||
.expect("Should start rest server!")
|
||||
var restServer: RestServerRef = nil
|
||||
|
||||
if config.apiBindAddress.isSome:
|
||||
restServer = RestServerRef
|
||||
.new(
|
||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||
initTAddress(config.apiBindAddress.get(), config.apiPort),
|
||||
bufferSize = (1024 * 64),
|
||||
maxRequestBodySize = int.high,
|
||||
)
|
||||
.expect("Should create rest server!")
|
||||
|
||||
switch.mount(network)
|
||||
|
||||
@ -308,4 +382,5 @@ proc new*(
|
||||
restServer: restServer,
|
||||
repoStore: repoStore,
|
||||
maintenance: maintenance,
|
||||
taskpool: taskpool)
|
||||
taskpool: taskpool,
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -25,15 +25,15 @@ export tables
|
||||
|
||||
const
|
||||
# Size of blocks for storage / network exchange,
|
||||
DefaultBlockSize* = NBytes 1024*64
|
||||
DefaultBlockSize* = NBytes 1024 * 64
|
||||
DefaultCellSize* = NBytes 2048
|
||||
|
||||
# Proving defaults
|
||||
DefaultMaxSlotDepth* = 32
|
||||
DefaultMaxSlotDepth* = 32
|
||||
DefaultMaxDatasetDepth* = 8
|
||||
DefaultBlockDepth* = 5
|
||||
DefaultCellElms* = 67
|
||||
DefaultSamplesNum* = 5
|
||||
DefaultBlockDepth* = 5
|
||||
DefaultCellElms* = 67
|
||||
DefaultSamplesNum* = 5
|
||||
|
||||
# hashes
|
||||
Sha256HashCodec* = multiCodec("sha2-256")
|
||||
@ -48,18 +48,10 @@ const
|
||||
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
||||
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
||||
|
||||
CodexHashesCodecs* = [
|
||||
Sha256HashCodec,
|
||||
Pos2Bn128SpngCodec,
|
||||
Pos2Bn128MrklCodec
|
||||
]
|
||||
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec]
|
||||
|
||||
CodexPrimitivesCodecs* = [
|
||||
ManifestCodec,
|
||||
DatasetRootCodec,
|
||||
BlockCodec,
|
||||
SlotRootCodec,
|
||||
SlotProvingRootCodec,
|
||||
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
|
||||
CodexSlotCellCodec,
|
||||
]
|
||||
|
||||
@ -74,40 +66,34 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||
let
|
||||
emptyData: seq[byte] = @[]
|
||||
PadHashes = {
|
||||
Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
|
||||
Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
|
||||
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||
}.toTable
|
||||
|
||||
var
|
||||
table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||
|
||||
for hcodec, mhash in PadHashes.pairs:
|
||||
table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure
|
||||
table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure
|
||||
|
||||
success table
|
||||
|
||||
proc emptyCid*(
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
dcodec: MultiCodec): ?!Cid =
|
||||
proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid =
|
||||
## Returns cid representing empty content,
|
||||
## given cid version, hash codec and data codec
|
||||
##
|
||||
|
||||
var
|
||||
table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
|
||||
var table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
|
||||
|
||||
once:
|
||||
table = ? initEmptyCidTable()
|
||||
table = ?initEmptyCidTable()
|
||||
|
||||
table[(version, hcodec, dcodec)].catch
|
||||
|
||||
proc emptyDigest*(
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
dcodec: MultiCodec): ?!MultiHash =
|
||||
version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec
|
||||
): ?!MultiHash =
|
||||
## Returns hash representing empty content,
|
||||
## given cid version, hash codec and data codec
|
||||
##
|
||||
emptyCid(version, hcodec, dcodec)
|
||||
.flatMap((cid: Cid) => cid.mhash.mapFailure)
|
||||
|
||||
emptyCid(version, hcodec, dcodec).flatMap((cid: Cid) => cid.mhash.mapFailure)
|
||||
|
||||
707
codex/conf.nim
707
codex/conf.nim
File diff suppressed because it is too large
Load Diff
8
codex/contentids_exts.nim
Normal file
8
codex/contentids_exts.nim
Normal file
@ -0,0 +1,8 @@
|
||||
const ContentIdsExts = [
|
||||
multiCodec("codex-root"),
|
||||
multiCodec("codex-manifest"),
|
||||
multiCodec("codex-block"),
|
||||
multiCodec("codex-slot-root"),
|
||||
multiCodec("codex-proving-root"),
|
||||
multiCodec("codex-slot-cell"),
|
||||
]
|
||||
@ -2,8 +2,10 @@ import contracts/requests
|
||||
import contracts/marketplace
|
||||
import contracts/market
|
||||
import contracts/interactions
|
||||
import contracts/provider
|
||||
|
||||
export requests
|
||||
export marketplace
|
||||
export market
|
||||
export interactions
|
||||
export provider
|
||||
|
||||
@ -1,13 +1,13 @@
|
||||
Codex Contracts in Nim
|
||||
Logos Storage Contracts in Nim
|
||||
=======================
|
||||
|
||||
Nim API for the [Codex smart contracts][1].
|
||||
Nim API for the [Logos Storage smart contracts][1].
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
For a global overview of the steps involved in starting and fulfilling a
|
||||
storage contract, see [Codex Contracts][1].
|
||||
storage contract, see [Logos Storage Contracts][1].
|
||||
|
||||
Smart contract
|
||||
--------------
|
||||
@ -144,5 +144,5 @@ await storage
|
||||
.markProofAsMissing(id, period)
|
||||
```
|
||||
|
||||
[1]: https://github.com/status-im/codex-contracts-eth/
|
||||
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
||||
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
|
||||
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md
|
||||
|
||||
@ -1,26 +1,32 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/times
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/chronos
|
||||
import pkg/stint
|
||||
import ../clock
|
||||
import ../conf
|
||||
import ../utils/trackedfutures
|
||||
|
||||
export clock
|
||||
|
||||
logScope:
|
||||
topics = "contracts clock"
|
||||
|
||||
type
|
||||
OnChainClock* = ref object of Clock
|
||||
provider: Provider
|
||||
subscription: Subscription
|
||||
offset: times.Duration
|
||||
blockNumber: UInt256
|
||||
started: bool
|
||||
newBlock: AsyncEvent
|
||||
type OnChainClock* = ref object of Clock
|
||||
provider: Provider
|
||||
subscription: Subscription
|
||||
offset: times.Duration
|
||||
blockNumber: UInt256
|
||||
started: bool
|
||||
newBlock: AsyncEvent
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||
OnChainClock(provider: provider, newBlock: newAsyncEvent())
|
||||
OnChainClock(
|
||||
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
|
||||
)
|
||||
|
||||
proc update(clock: OnChainClock, blck: Block) =
|
||||
if number =? blck.number and number > clock.blockNumber:
|
||||
@ -28,26 +34,28 @@ proc update(clock: OnChainClock, blck: Block) =
|
||||
let computerTime = getTime()
|
||||
clock.offset = blockTime - computerTime
|
||||
clock.blockNumber = number
|
||||
trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset
|
||||
trace "updated clock",
|
||||
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
||||
clock.newBlock.fire()
|
||||
|
||||
proc update(clock: OnChainClock) {.async.} =
|
||||
proc update(clock: OnChainClock) {.async: (raises: []).} =
|
||||
try:
|
||||
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
clock.update(latest)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
debug "error updating clock: ", error=error.msg
|
||||
discard
|
||||
debug "error updating clock: ", error = error.msg
|
||||
|
||||
method start*(clock: OnChainClock) {.async.} =
|
||||
if clock.started:
|
||||
return
|
||||
|
||||
proc onBlock(_: Block) =
|
||||
proc onBlock(blckResult: ?!Block) =
|
||||
if eventError =? blckResult.errorOption:
|
||||
error "There was an error in block subscription", msg = eventError.msg
|
||||
return
|
||||
|
||||
# ignore block parameter; hardhat may call this with pending blocks
|
||||
asyncSpawn clock.update()
|
||||
clock.trackedFutures.track(clock.update())
|
||||
|
||||
await clock.update()
|
||||
|
||||
@ -59,13 +67,16 @@ method stop*(clock: OnChainClock) {.async.} =
|
||||
return
|
||||
|
||||
await clock.subscription.unsubscribe()
|
||||
await clock.trackedFutures.cancelTracked()
|
||||
clock.started = false
|
||||
|
||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||
doAssert clock.started, "clock should be started before calling now()"
|
||||
return toUnix(getTime() + clock.offset)
|
||||
|
||||
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
|
||||
method waitUntil*(
|
||||
clock: OnChainClock, time: SecondsSince1970
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
while (let difference = time - clock.now(); difference > 0):
|
||||
clock.newBlock.clear()
|
||||
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
||||
|
||||
@ -1,52 +1,71 @@
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/fields
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
|
||||
export contractabi
|
||||
|
||||
const DefaultRequestCacheSize* = 128.uint16
|
||||
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
|
||||
|
||||
type
|
||||
MarketplaceConfig* = object
|
||||
collateral*: CollateralConfig
|
||||
proofs*: ProofConfig
|
||||
reservations*: SlotReservationsConfig
|
||||
requestDurationLimit*: uint64
|
||||
|
||||
CollateralConfig* = object
|
||||
repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed
|
||||
repairRewardPercentage*: uint8
|
||||
# percentage of remaining collateral slot has after it has been freed
|
||||
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
||||
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
|
||||
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
||||
validatorRewardPercentage*: uint8
|
||||
# percentage of the slashed amount going to the validators
|
||||
|
||||
ProofConfig* = object
|
||||
period*: UInt256 # proofs requirements are calculated per period (in seconds)
|
||||
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
|
||||
period*: uint64 # proofs requirements are calculated per period (in seconds)
|
||||
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
|
||||
downtime*: uint8 # ignore this much recent blocks for proof requirements
|
||||
downtimeProduct*: uint8
|
||||
zkeyHash*: string # hash of the zkey file which is linked to the verifier
|
||||
# Ensures the pointer does not remain in downtime for many consecutive
|
||||
# periods. For each period increase, move the pointer `pointerProduct`
|
||||
# blocks. Should be a prime number to ensure there are no cycles.
|
||||
downtimeProduct*: uint8
|
||||
|
||||
SlotReservationsConfig* = object
|
||||
maxReservations*: uint8
|
||||
|
||||
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
||||
ProofConfig(
|
||||
period: tupl[0],
|
||||
timeout: tupl[1],
|
||||
downtime: tupl[2],
|
||||
zkeyHash: tupl[3],
|
||||
downtimeProduct: tupl[4]
|
||||
downtimeProduct: tupl[3],
|
||||
zkeyHash: tupl[4],
|
||||
)
|
||||
|
||||
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
|
||||
SlotReservationsConfig(maxReservations: tupl[0])
|
||||
|
||||
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
||||
CollateralConfig(
|
||||
repairRewardPercentage: tupl[0],
|
||||
maxNumberOfSlashes: tupl[1],
|
||||
slashCriterion: tupl[2],
|
||||
slashPercentage: tupl[3]
|
||||
slashPercentage: tupl[2],
|
||||
validatorRewardPercentage: tupl[3],
|
||||
)
|
||||
|
||||
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
||||
MarketplaceConfig(
|
||||
collateral: tupl[0],
|
||||
proofs: tupl[1]
|
||||
proofs: tupl[1],
|
||||
reservations: tupl[2],
|
||||
requestDurationLimit: tupl[3],
|
||||
)
|
||||
|
||||
func solidityType*(_: type SlotReservationsConfig): string =
|
||||
solidityType(SlotReservationsConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type ProofConfig): string =
|
||||
solidityType(ProofConfig.fieldTypes)
|
||||
|
||||
@ -54,7 +73,10 @@ func solidityType*(_: type CollateralConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type MarketplaceConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
solidityType(MarketplaceConfig.fieldTypes)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
@ -69,6 +91,10 @@ func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
|
||||
let tupl = ?decoder.read(ProofConfig.fieldTypes)
|
||||
success ProofConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
|
||||
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
|
||||
success SlotReservationsConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
|
||||
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
|
||||
success CollateralConfig.fromTuple(tupl)
|
||||
|
||||
@ -9,38 +9,42 @@ import ./marketplace
|
||||
|
||||
type Deployment* = ref object
|
||||
provider: Provider
|
||||
config: CodexConf
|
||||
marketplaceAddressOverride: ?Address
|
||||
|
||||
const knownAddresses = {
|
||||
# Hardhat localhost network
|
||||
"31337": {
|
||||
"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"),
|
||||
}.toTable,
|
||||
# Taiko Alpha-3 Testnet
|
||||
"167005": {
|
||||
"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")
|
||||
}.toTable,
|
||||
# Codex Testnet - Oct 21 2024 07:31:50 AM (+00:00 UTC)
|
||||
"789987": {
|
||||
"Marketplace": Address.init("0x3F9Cf3F40F0e87d804B776D8403e3d29F85211f4")
|
||||
}.toTable
|
||||
# Hardhat localhost network
|
||||
"31337":
|
||||
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
|
||||
# Taiko Alpha-3 Testnet
|
||||
"167005":
|
||||
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
||||
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
|
||||
"789987":
|
||||
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
|
||||
# Linea (Status)
|
||||
"1660990954":
|
||||
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
|
||||
}.toTable
|
||||
|
||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||
let id = chainId.toString(10)
|
||||
notice "Looking for well-known contract address with ChainID ", chainId=id
|
||||
notice "Looking for well-known contract address with ChainID ", chainId = id
|
||||
|
||||
if not (id in knownAddresses):
|
||||
return none Address
|
||||
|
||||
return knownAddresses[id].getOrDefault($T, Address.none)
|
||||
|
||||
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
|
||||
Deployment(provider: provider, config: config)
|
||||
proc new*(
|
||||
_: type Deployment,
|
||||
provider: Provider,
|
||||
marketplaceAddressOverride: ?Address = none Address,
|
||||
): Deployment =
|
||||
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
|
||||
|
||||
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
|
||||
when contract is Marketplace:
|
||||
if address =? deployment.config.marketplaceAddress:
|
||||
if address =? deployment.marketplaceAddressOverride:
|
||||
return some address
|
||||
|
||||
let chainId = await deployment.provider.getChainId()
|
||||
|
||||
@ -9,13 +9,12 @@ import ./interactions
|
||||
export purchasing
|
||||
export logutils
|
||||
|
||||
type
|
||||
ClientInteractions* = ref object of ContractInteractions
|
||||
purchasing*: Purchasing
|
||||
type ClientInteractions* = ref object of ContractInteractions
|
||||
purchasing*: Purchasing
|
||||
|
||||
proc new*(_: type ClientInteractions,
|
||||
clock: OnChainClock,
|
||||
purchasing: Purchasing): ClientInteractions =
|
||||
proc new*(
|
||||
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
|
||||
): ClientInteractions =
|
||||
ClientInteractions(clock: clock, purchasing: purchasing)
|
||||
|
||||
proc start*(self: ClientInteractions) {.async.} =
|
||||
|
||||
@ -7,15 +7,10 @@ import ./interactions
|
||||
export sales
|
||||
export logutils
|
||||
|
||||
type
|
||||
HostInteractions* = ref object of ContractInteractions
|
||||
sales*: Sales
|
||||
type HostInteractions* = ref object of ContractInteractions
|
||||
sales*: Sales
|
||||
|
||||
proc new*(
|
||||
_: type HostInteractions,
|
||||
clock: Clock,
|
||||
sales: Sales
|
||||
): HostInteractions =
|
||||
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
|
||||
## Create a new HostInteractions instance
|
||||
##
|
||||
HostInteractions(clock: clock, sales: sales)
|
||||
|
||||
@ -5,9 +5,8 @@ import ../market
|
||||
|
||||
export clock
|
||||
|
||||
type
|
||||
ContractInteractions* = ref object of RootObj
|
||||
clock*: Clock
|
||||
type ContractInteractions* = ref object of RootObj
|
||||
clock*: Clock
|
||||
|
||||
method start*(self: ContractInteractions) {.async, base.} =
|
||||
discard
|
||||
|
||||
@ -3,13 +3,12 @@ import ../../validation
|
||||
|
||||
export validation
|
||||
|
||||
type
|
||||
ValidatorInteractions* = ref object of ContractInteractions
|
||||
validation: Validation
|
||||
type ValidatorInteractions* = ref object of ContractInteractions
|
||||
validation: Validation
|
||||
|
||||
proc new*(_: type ValidatorInteractions,
|
||||
clock: OnChainClock,
|
||||
validation: Validation): ValidatorInteractions =
|
||||
proc new*(
|
||||
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
|
||||
): ValidatorInteractions =
|
||||
ValidatorInteractions(clock: clock, validation: validation)
|
||||
|
||||
proc start*(self: ValidatorInteractions) {.async.} =
|
||||
|
||||
@ -1,14 +1,14 @@
|
||||
import std/sequtils
|
||||
import std/strformat
|
||||
import std/strutils
|
||||
import std/sugar
|
||||
import pkg/ethers
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/lrucache
|
||||
import ../utils/exceptions
|
||||
import ../logutils
|
||||
import ../market
|
||||
import ./marketplace
|
||||
import ./proofs
|
||||
import ./provider
|
||||
|
||||
export market
|
||||
|
||||
@ -20,128 +20,225 @@ type
|
||||
contract: Marketplace
|
||||
signer: Signer
|
||||
rewardRecipient: ?Address
|
||||
configuration: ?MarketplaceConfig
|
||||
requestCache: LruCache[string, StorageRequest]
|
||||
allowanceLock: AsyncLock
|
||||
|
||||
MarketSubscription = market.Subscription
|
||||
EventSubscription = ethers.Subscription
|
||||
OnChainMarketSubscription = ref object of MarketSubscription
|
||||
eventSubscription: EventSubscription
|
||||
|
||||
func new*(
|
||||
_: type OnChainMarket,
|
||||
contract: Marketplace,
|
||||
rewardRecipient = Address.none): OnChainMarket =
|
||||
|
||||
_: type OnChainMarket,
|
||||
contract: Marketplace,
|
||||
rewardRecipient = Address.none,
|
||||
requestCacheSize: uint16 = DefaultRequestCacheSize,
|
||||
): OnChainMarket =
|
||||
without signer =? contract.signer:
|
||||
raiseAssert("Marketplace contract should have a signer")
|
||||
|
||||
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
|
||||
|
||||
OnChainMarket(
|
||||
contract: contract,
|
||||
signer: signer,
|
||||
rewardRecipient: rewardRecipient
|
||||
rewardRecipient: rewardRecipient,
|
||||
requestCache: requestCache,
|
||||
)
|
||||
|
||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||
raise newException(MarketError, message)
|
||||
|
||||
template convertEthersError(body) =
|
||||
func prefixWith(suffix, prefix: string, separator = ": "): string =
|
||||
if prefix.len > 0:
|
||||
return &"{prefix}{separator}{suffix}"
|
||||
else:
|
||||
return suffix
|
||||
|
||||
template convertEthersError(msg: string = "", body) =
|
||||
try:
|
||||
body
|
||||
except EthersError as error:
|
||||
raiseMarketError(error.msgDetail)
|
||||
raiseMarketError(error.msgDetail.prefixWith(msg))
|
||||
|
||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
proc config(
|
||||
market: OnChainMarket
|
||||
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
without resolvedConfig =? market.configuration:
|
||||
if err =? (await market.loadConfig()).errorOption:
|
||||
raiseMarketError(err.msg)
|
||||
|
||||
without config =? market.configuration:
|
||||
raiseMarketError("Failed to access to config from the Marketplace contract")
|
||||
|
||||
return config
|
||||
|
||||
return resolvedConfig
|
||||
|
||||
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
|
||||
if market.allowanceLock.isNil:
|
||||
market.allowanceLock = newAsyncLock()
|
||||
await market.allowanceLock.acquire()
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
try:
|
||||
market.allowanceLock.release()
|
||||
except AsyncLockError as error:
|
||||
raise newException(Defect, error.msg, error)
|
||||
|
||||
proc approveFunds(
|
||||
market: OnChainMarket, amount: UInt256
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to approve funds"):
|
||||
let tokenAddress = await market.contract.token()
|
||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||
discard await token.increaseAllowance(market.contract.address(), amount).confirm(0)
|
||||
let owner = await market.signer.getAddress()
|
||||
let spender = market.contract.address
|
||||
market.withAllowanceLock:
|
||||
let allowance = await token.allowance(owner, spender)
|
||||
discard await token.approve(spender, allowance + amount).confirm(1)
|
||||
|
||||
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
||||
let config = await market.contract.config()
|
||||
method loadConfig*(
|
||||
market: OnChainMarket
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
without config =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
|
||||
market.configuration = some fetchedConfig
|
||||
|
||||
return success()
|
||||
except EthersError as err:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failed to fetch the config from the Marketplace contract: " & err.msg,
|
||||
)
|
||||
|
||||
method getZkeyHash*(
|
||||
market: OnChainMarket
|
||||
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let config = await market.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
convertEthersError:
|
||||
method getSigner*(
|
||||
market: OnChainMarket
|
||||
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get signer address"):
|
||||
return await market.signer.getAddress()
|
||||
|
||||
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.config()
|
||||
method periodicity*(
|
||||
market: OnChainMarket
|
||||
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
let period = config.proofs.period
|
||||
return Periodicity(seconds: period)
|
||||
|
||||
method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.config()
|
||||
method proofTimeout*(
|
||||
market: OnChainMarket
|
||||
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.timeout
|
||||
|
||||
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.config()
|
||||
method repairRewardPercentage*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.collateral.repairRewardPercentage
|
||||
|
||||
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.requestDurationLimit
|
||||
|
||||
method proofDowntime*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.downtime
|
||||
|
||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get slot pointer"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getPointer(slotId, overrides)
|
||||
|
||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get my requests"):
|
||||
return await market.contract.myRequests
|
||||
|
||||
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get my slots"):
|
||||
let slots = await market.contract.mySlots()
|
||||
debug "Fetched my slots", numSlots=len(slots)
|
||||
debug "Fetched my slots", numSlots = len(slots)
|
||||
|
||||
return slots
|
||||
|
||||
method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
|
||||
convertEthersError:
|
||||
method requestStorage(
|
||||
market: OnChainMarket, request: StorageRequest
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to request storage"):
|
||||
debug "Requesting storage"
|
||||
await market.approveFunds(request.price())
|
||||
discard await market.contract.requestStorage(request).confirm(0)
|
||||
await market.approveFunds(request.totalPrice())
|
||||
discard await market.contract.requestStorage(request).confirm(1)
|
||||
|
||||
method getRequest(market: OnChainMarket,
|
||||
id: RequestId): Future[?StorageRequest] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
return some await market.contract.getRequest(id)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Unknown request"):
|
||||
return none StorageRequest
|
||||
raise e
|
||||
method getRequest*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let key = $id
|
||||
|
||||
method requestState*(market: OnChainMarket,
|
||||
requestId: RequestId): Future[?RequestState] {.async.} =
|
||||
convertEthersError:
|
||||
if key in market.requestCache:
|
||||
return some market.requestCache[key]
|
||||
|
||||
let request = await market.contract.getRequest(id)
|
||||
market.requestCache[key] = request
|
||||
return some request
|
||||
except Marketplace_UnknownRequest, KeyError:
|
||||
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
|
||||
return none StorageRequest
|
||||
except EthersError as e:
|
||||
error "Cannot retrieve the request", error = e.msg
|
||||
return none StorageRequest
|
||||
|
||||
method requestState*(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
): Future[?RequestState] {.async.} =
|
||||
convertEthersError("Failed to get request state"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return some await market.contract.requestState(requestId, overrides)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Unknown request"):
|
||||
return none RequestState
|
||||
raise e
|
||||
except Marketplace_UnknownRequest:
|
||||
return none RequestState
|
||||
|
||||
method slotState*(market: OnChainMarket,
|
||||
slotId: SlotId): Future[SlotState] {.async.} =
|
||||
convertEthersError:
|
||||
method slotState*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.slotState(slotId, overrides)
|
||||
|
||||
method getRequestEnd*(market: OnChainMarket,
|
||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
method getRequestEnd*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError("Failed to get request end"):
|
||||
return await market.contract.requestEnd(id)
|
||||
|
||||
method requestExpiresAt*(market: OnChainMarket,
|
||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
method requestExpiresAt*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError("Failed to get request expiry"):
|
||||
return await market.contract.requestExpiry(id)
|
||||
|
||||
method getHost(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256): Future[?Address] {.async.} =
|
||||
convertEthersError:
|
||||
method getHost(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get slot's host"):
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
if address != Address.default:
|
||||
@ -149,266 +246,435 @@ method getHost(market: OnChainMarket,
|
||||
else:
|
||||
return none Address
|
||||
|
||||
method getActiveSlot*(market: OnChainMarket,
|
||||
slotId: SlotId): Future[?Slot] {.async.} =
|
||||
convertEthersError:
|
||||
method currentCollateral*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
|
||||
convertEthersError("Failed to get slot's current collateral"):
|
||||
return await market.contract.currentCollateral(slotId)
|
||||
|
||||
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
||||
convertEthersError("Failed to get active slot"):
|
||||
try:
|
||||
return some await market.contract.getActiveSlot(slotId)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Slot is free"):
|
||||
return none Slot
|
||||
raise e
|
||||
except Marketplace_SlotIsFree:
|
||||
return none Slot
|
||||
|
||||
method fillSlot(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256) {.async.} =
|
||||
convertEthersError:
|
||||
await market.approveFunds(collateral)
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(0)
|
||||
method fillSlot(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fill slot"):
|
||||
logScope:
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||
convertEthersError:
|
||||
var freeSlot: Future[?TransactionResponse]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient) # SP's address
|
||||
try:
|
||||
await market.approveFunds(collateral)
|
||||
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
freeSlot = market.contract.freeSlot(slotId)
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the last one to fill a slot in this request
|
||||
trace "estimating gas for fillSlot"
|
||||
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
|
||||
let gasLimit = (gas * 110) div 100
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
discard await freeSlot.confirm(0)
|
||||
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
discard await market.contract
|
||||
.fillSlot(requestId, slotIndex, proof, overrides)
|
||||
.confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
except Marketplace_SlotNotFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
|
||||
parent,
|
||||
)
|
||||
|
||||
method freeSlot*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to free slot"):
|
||||
try:
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
|
||||
method withdrawFunds(market: OnChainMarket,
|
||||
requestId: RequestId) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(0)
|
||||
# Add 200% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.freeSlot(
|
||||
slotId, rewardRecipient, collateralRecipient
|
||||
)
|
||||
let gasLimit = gas * 3
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
method isProofRequired*(market: OnChainMarket,
|
||||
id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient, # SP's address
|
||||
overrides,
|
||||
)
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
|
||||
# Add 200% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.freeSlot(slotId)
|
||||
let gasLimit = gas * 3
|
||||
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
|
||||
|
||||
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
freeSlot = market.contract.freeSlot(slotId, overrides)
|
||||
|
||||
discard await freeSlot.confirm(1)
|
||||
except Marketplace_SlotIsFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
|
||||
)
|
||||
|
||||
method withdrawFunds(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to withdraw funds"):
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||
|
||||
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError("Failed to get proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.isProofRequired(id, overrides)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Slot is free"):
|
||||
return false
|
||||
raise e
|
||||
except Marketplace_SlotIsFree:
|
||||
return false
|
||||
|
||||
method willProofBeRequired*(market: OnChainMarket,
|
||||
id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError("Failed to get future proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.willProofBeRequired(id, overrides)
|
||||
except ProviderError as e:
|
||||
if e.msgDetail.contains("Slot is free"):
|
||||
return false
|
||||
raise e
|
||||
except Marketplace_SlotIsFree:
|
||||
return false
|
||||
|
||||
method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} =
|
||||
convertEthersError:
|
||||
method getChallenge*(
|
||||
market: OnChainMarket, id: SlotId
|
||||
): Future[ProofChallenge] {.async.} =
|
||||
convertEthersError("Failed to get proof challenge"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getChallenge(id, overrides)
|
||||
|
||||
method submitProof*(market: OnChainMarket,
|
||||
id: SlotId,
|
||||
proof: Groth16Proof) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract.submitProof(id, proof).confirm(0)
|
||||
method submitProof*(
|
||||
market: OnChainMarket, id: SlotId, proof: Groth16Proof
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to submit proof"):
|
||||
try:
|
||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||
except Proofs_InvalidProof as parent:
|
||||
raise newException(
|
||||
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
|
||||
)
|
||||
|
||||
method markProofAsMissing*(market: OnChainMarket,
|
||||
id: SlotId,
|
||||
period: Period) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract.markProofAsMissing(id, period).confirm(0)
|
||||
method markProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to mark proof as missing"):
|
||||
# Add 50% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
|
||||
let gasLimit = (gas * 150) div 100
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
market: OnChainMarket,
|
||||
id: SlotId,
|
||||
period: Period
|
||||
): Future[bool] {.async.} =
|
||||
let provider = market.contract.provider
|
||||
let contractWithoutSigner = market.contract.connect(provider)
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
trace "calling markProofAsMissing on contract",
|
||||
estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
|
||||
|
||||
method canMarkProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
|
||||
return true
|
||||
except EthersError as e:
|
||||
trace "Proof cannot be marked as missing", msg = e.msg
|
||||
return false
|
||||
|
||||
method reserveSlot*(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256) {.async.} =
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to reserve slot"):
|
||||
try:
|
||||
# Add 25% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the last one that is allowed to reserve the slot
|
||||
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
|
||||
let gasLimit = (gas * 125) div 100
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
convertEthersError:
|
||||
discard await market.contract.reserveSlot(requestId, slotIndex).confirm(0)
|
||||
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
discard
|
||||
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
|
||||
except SlotReservations_ReservationNotAllowed:
|
||||
raise newException(
|
||||
SlotReservationNotAllowedError,
|
||||
"Failed to reserve slot because reservation is not allowed",
|
||||
)
|
||||
|
||||
method canReserveSlot*(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256): Future[bool] {.async.} =
|
||||
|
||||
convertEthersError:
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async.} =
|
||||
convertEthersError("Unable to determine if slot can be reserved"):
|
||||
return await market.contract.canReserveSlot(requestId, slotIndex)
|
||||
|
||||
method subscribeRequests*(market: OnChainMarket,
|
||||
callback: OnRequest):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: StorageRequested) {.upraises:[].} =
|
||||
callback(event.requestId,
|
||||
event.ask,
|
||||
event.expiry)
|
||||
method subscribeRequests*(
|
||||
market: OnChainMarket, callback: OnRequest
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in Request subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
convertEthersError:
|
||||
callback(event.requestId, event.ask, event.expiry)
|
||||
|
||||
convertEthersError("Failed to subscribe to StorageRequested events"):
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(market: OnChainMarket,
|
||||
callback: OnSlotFilled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: SlotFilled) {.upraises:[].} =
|
||||
method subscribeSlotFilled*(
|
||||
market: OnChainMarket, callback: OnSlotFilled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
callback: OnSlotFilled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
|
||||
method subscribeSlotFilled*(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
callback: OnSlotFilled,
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
|
||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||
callback(requestId, slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
return await market.subscribeSlotFilled(onSlotFilled)
|
||||
|
||||
method subscribeSlotFreed*(market: OnChainMarket,
|
||||
callback: OnSlotFreed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: SlotFreed) {.upraises:[].} =
|
||||
method subscribeSlotFreed*(
|
||||
market: OnChainMarket, callback: OnSlotFreed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFreed events"):
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotReservationsFull*(
|
||||
market: OnChainMarket,
|
||||
callback: OnSlotReservationsFull): Future[MarketSubscription] {.async.} =
|
||||
market: OnChainMarket, callback: OnSlotReservationsFull
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotReservationsFull subscription",
|
||||
msg = eventErr.msg
|
||||
return
|
||||
|
||||
proc onEvent(event: SlotReservationsFull) {.upraises:[].} =
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
|
||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(market: OnChainMarket,
|
||||
callback: OnFulfillment):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||
method subscribeFulfillment(
|
||||
market: OnChainMarket, callback: OnFulfillment
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||
method subscribeFulfillment(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
callback: OnRequestCancelled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||
method subscribeRequestCancelled*(
|
||||
market: OnChainMarket, callback: OnRequestCancelled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||
method subscribeRequestCancelled*(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||
method subscribeRequestFailed*(
|
||||
market: OnChainMarket, callback: OnRequestFailed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||
method subscribeRequestFailed*(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeProofSubmission*(market: OnChainMarket,
|
||||
callback: OnProofSubmitted):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: ProofSubmitted) {.upraises: [].} =
|
||||
method subscribeProofSubmission*(
|
||||
market: OnChainMarket, callback: OnProofSubmitted
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.id)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to ProofSubmitted events"):
|
||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
await subscription.eventSubscription.unsubscribe()
|
||||
|
||||
method queryPastEvents*[T: MarketplaceEvent](
|
||||
market: OnChainMarket,
|
||||
_: type T,
|
||||
blocksAgo: int): Future[seq[T]] {.async.} =
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError("Failed to get past SlotFilled events from block"):
|
||||
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
||||
|
||||
convertEthersError:
|
||||
let contract = market.contract
|
||||
let provider = contract.provider
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError("Failed to get past SlotFilled events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
let head = await provider.getBlockNumber()
|
||||
let fromBlock = BlockTag.init(head - blocksAgo.abs.u256)
|
||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||
|
||||
return await contract.queryFilter(T,
|
||||
fromBlock,
|
||||
BlockTag.latest)
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromTime: SecondsSince1970
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError("Failed to get past SlotFilled events from time"):
|
||||
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError("Failed to get past StorageRequested events from block"):
|
||||
return
|
||||
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError("Failed to get past StorageRequested events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
|
||||
let slotid = slotId(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let slotState = await market.slotState(slotid)
|
||||
|
||||
without request =? await market.getRequest(requestId):
|
||||
return failure newException(
|
||||
MarketError, "Failure calculating the slotCollateral, cannot get the request"
|
||||
)
|
||||
|
||||
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
|
||||
except MarketError as error:
|
||||
error "Error when trying to calculate the slotCollateral", error = error.msg
|
||||
return failure error
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.raises: [].} =
|
||||
if slotState == SlotState.Repair:
|
||||
without repairRewardPercentage =?
|
||||
market.configuration .? collateral .? repairRewardPercentage:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failure calculating the slotCollateral, cannot get the reward percentage",
|
||||
)
|
||||
|
||||
return success (
|
||||
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
|
||||
100.u256
|
||||
)
|
||||
)
|
||||
|
||||
return success(collateralPerSlot)
|
||||
|
||||
@ -17,40 +17,182 @@ export requests
|
||||
type
|
||||
Marketplace* = ref object of Contract
|
||||
|
||||
proc config*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError
|
||||
Marketplace_SlashPercentageTooHigh* = object of SolidityError
|
||||
Marketplace_MaximumSlashingTooHigh* = object of SolidityError
|
||||
Marketplace_InvalidExpiry* = object of SolidityError
|
||||
Marketplace_InvalidMaxSlotLoss* = object of SolidityError
|
||||
Marketplace_InsufficientSlots* = object of SolidityError
|
||||
Marketplace_InvalidClientAddress* = object of SolidityError
|
||||
Marketplace_RequestAlreadyExists* = object of SolidityError
|
||||
Marketplace_InvalidSlot* = object of SolidityError
|
||||
Marketplace_SlotNotFree* = object of SolidityError
|
||||
Marketplace_InvalidSlotHost* = object of SolidityError
|
||||
Marketplace_AlreadyPaid* = object of SolidityError
|
||||
Marketplace_TransferFailed* = object of SolidityError
|
||||
Marketplace_UnknownRequest* = object of SolidityError
|
||||
Marketplace_InvalidState* = object of SolidityError
|
||||
Marketplace_StartNotBeforeExpiry* = object of SolidityError
|
||||
Marketplace_SlotNotAcceptingProofs* = object of SolidityError
|
||||
Marketplace_SlotIsFree* = object of SolidityError
|
||||
Marketplace_ReservationRequired* = object of SolidityError
|
||||
Marketplace_NothingToWithdraw* = object of SolidityError
|
||||
Marketplace_InsufficientDuration* = object of SolidityError
|
||||
Marketplace_InsufficientProofProbability* = object of SolidityError
|
||||
Marketplace_InsufficientCollateral* = object of SolidityError
|
||||
Marketplace_InsufficientReward* = object of SolidityError
|
||||
Marketplace_InvalidCid* = object of SolidityError
|
||||
Marketplace_DurationExceedsLimit* = object of SolidityError
|
||||
Proofs_InsufficientBlockHeight* = object of SolidityError
|
||||
Proofs_InvalidProof* = object of SolidityError
|
||||
Proofs_ProofAlreadySubmitted* = object of SolidityError
|
||||
Proofs_PeriodNotEnded* = object of SolidityError
|
||||
Proofs_ValidationTimedOut* = object of SolidityError
|
||||
Proofs_ProofNotMissing* = object of SolidityError
|
||||
Proofs_ProofNotRequired* = object of SolidityError
|
||||
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
||||
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
||||
SlotReservations_ReservationNotAllowed* = object of SolidityError
|
||||
|
||||
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
proc currentCollateral*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): UInt256 {.contract, view.}
|
||||
|
||||
proc requestStorage*(
|
||||
marketplace: Marketplace, request: StorageRequest
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
||||
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
||||
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
|
||||
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
|
||||
Marketplace_InsufficientReward, Marketplace_InvalidCid,
|
||||
]
|
||||
.}
|
||||
|
||||
proc fillSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
|
||||
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
|
||||
]
|
||||
.}
|
||||
|
||||
proc withdrawFunds*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
|
||||
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
|
||||
]
|
||||
.}
|
||||
|
||||
proc withdrawFunds*(
|
||||
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
|
||||
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
|
||||
]
|
||||
.}
|
||||
|
||||
proc freeSlot*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
|
||||
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
|
||||
]
|
||||
.}
|
||||
|
||||
proc freeSlot*(
|
||||
marketplace: Marketplace,
|
||||
id: SlotId,
|
||||
rewardRecipient: Address,
|
||||
collateralRecipient: Address,
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
|
||||
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
|
||||
]
|
||||
.}
|
||||
|
||||
proc getRequest*(
|
||||
marketplace: Marketplace, id: RequestId
|
||||
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
|
||||
|
||||
proc requestStorage*(marketplace: Marketplace, request: StorageRequest): ?TransactionResponse {.contract.}
|
||||
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): ?TransactionResponse {.contract.}
|
||||
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): ?TransactionResponse {.contract.}
|
||||
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address): ?TransactionResponse {.contract.}
|
||||
proc freeSlot*(marketplace: Marketplace, id: SlotId): ?TransactionResponse {.contract.}
|
||||
proc freeSlot*(marketplace: Marketplace, id: SlotId, rewardRecipient: Address, collateralRecipient: Address): ?TransactionResponse {.contract.}
|
||||
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.}
|
||||
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
|
||||
proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.}
|
||||
proc getActiveSlot*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
|
||||
|
||||
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
|
||||
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
|
||||
proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.}
|
||||
proc requestState*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
|
||||
|
||||
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
|
||||
proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||
proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
||||
proc requestEnd*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc requestExpiry*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.}
|
||||
proc getChallenge*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): array[32, byte] {.contract, view.}
|
||||
|
||||
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
|
||||
|
||||
proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): ?TransactionResponse {.contract.}
|
||||
proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): ?TransactionResponse {.contract.}
|
||||
proc submitProof*(
|
||||
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors:
|
||||
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
|
||||
.}
|
||||
|
||||
proc reserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): ?TransactionResponse {.contract.}
|
||||
proc canReserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): bool {.contract, view.}
|
||||
proc markProofAsMissing*(
|
||||
marketplace: Marketplace, id: SlotId, period: uint64
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
|
||||
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
|
||||
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
|
||||
]
|
||||
.}
|
||||
|
||||
proc canMarkProofAsMissing*(
|
||||
marketplace: Marketplace, id: SlotId, period: uint64
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
|
||||
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
|
||||
Proofs_ProofAlreadyMarkedMissing,
|
||||
]
|
||||
.}
|
||||
|
||||
proc reserveSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||
): Confirmable {.contract.}
|
||||
|
||||
proc canReserveSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||
): bool {.contract, view.}
|
||||
|
||||
@ -1,19 +1,22 @@
|
||||
import pkg/stint
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/fields
|
||||
import pkg/ethers/contracts/fields
|
||||
|
||||
type
|
||||
Groth16Proof* = object
|
||||
a*: G1Point
|
||||
b*: G2Point
|
||||
c*: G1Point
|
||||
|
||||
G1Point* = object
|
||||
x*: UInt256
|
||||
y*: UInt256
|
||||
|
||||
# A field element F_{p^2} encoded as `real + i * imag`
|
||||
Fp2Element* = object
|
||||
real*: UInt256
|
||||
imag*: UInt256
|
||||
|
||||
G2Point* = object
|
||||
x*: Fp2Element
|
||||
y*: Fp2Element
|
||||
|
||||
123
codex/contracts/provider.nim
Normal file
123
codex/contracts/provider.nim
Normal file
@ -0,0 +1,123 @@
|
||||
import pkg/ethers/provider
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
|
||||
import ../logutils
|
||||
|
||||
from ../clock import SecondsSince1970
|
||||
|
||||
logScope:
|
||||
topics = "marketplace onchain provider"
|
||||
|
||||
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
|
||||
raise newException(ProviderError, message)
|
||||
|
||||
proc blockNumberAndTimestamp*(
|
||||
provider: Provider, blockTag: BlockTag
|
||||
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
without latestBlock =? await provider.getBlock(blockTag):
|
||||
raiseProviderError("Could not get latest block")
|
||||
|
||||
without latestBlockNumber =? latestBlock.number:
|
||||
raiseProviderError("Could not get latest block number")
|
||||
|
||||
return (latestBlockNumber, latestBlock.timestamp)
|
||||
|
||||
proc binarySearchFindClosestBlock(
|
||||
provider: Provider, epochTime: int, low: UInt256, high: UInt256
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
|
||||
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
|
||||
if abs(lowTimestamp.truncate(int) - epochTime) <
|
||||
abs(highTimestamp.truncate(int) - epochTime):
|
||||
return low
|
||||
else:
|
||||
return high
|
||||
|
||||
proc binarySearchBlockNumberForEpoch(
|
||||
provider: Provider,
|
||||
epochTime: UInt256,
|
||||
latestBlockNumber: UInt256,
|
||||
earliestBlockNumber: UInt256,
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
var low = earliestBlockNumber
|
||||
var high = latestBlockNumber
|
||||
|
||||
while low <= high:
|
||||
if low == 0 and high == 0:
|
||||
return low
|
||||
let mid = (low + high) div 2
|
||||
let (midBlockNumber, midBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
|
||||
|
||||
if midBlockTimestamp < epochTime:
|
||||
low = mid + 1
|
||||
elif midBlockTimestamp > epochTime:
|
||||
high = mid - 1
|
||||
else:
|
||||
return midBlockNumber
|
||||
# NOTICE that by how the binary search is implemented, when it finishes
|
||||
# low is always greater than high - this is why we use high, where
|
||||
# intuitively we would use low:
|
||||
await provider.binarySearchFindClosestBlock(
|
||||
epochTime.truncate(int), low = high, high = low
|
||||
)
|
||||
|
||||
proc blockNumberForEpoch*(
|
||||
provider: Provider, epochTime: SecondsSince1970
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let epochTimeUInt256 = epochTime.u256
|
||||
let (latestBlockNumber, latestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
let (earliestBlockNumber, earliestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.earliest)
|
||||
|
||||
# Initially we used the average block time to predict
|
||||
# the number of blocks we need to look back in order to find
|
||||
# the block number corresponding to the given epoch time.
|
||||
# This estimation can be highly inaccurate if block time
|
||||
# was changing in the past or is fluctuating and therefore
|
||||
# we used that information initially only to find out
|
||||
# if the available history is long enough to perform effective search.
|
||||
# It turns out we do not have to do that. There is an easier way.
|
||||
#
|
||||
# First we check if the given epoch time equals the timestamp of either
|
||||
# the earliest or the latest block. If it does, we just return the
|
||||
# block number of that block.
|
||||
#
|
||||
# Otherwise, if the earliest available block is not the genesis block,
|
||||
# we should check the timestamp of that earliest block and if it is greater
|
||||
# than the epoch time, we should issue a warning and return
|
||||
# that earliest block number.
|
||||
# In all other cases, thus when the earliest block is not the genesis
|
||||
# block but its timestamp is not greater than the requested epoch time, or
|
||||
# if the earliest available block is the genesis block,
|
||||
# (which means we have the whole history available), we should proceed with
|
||||
# the binary search.
|
||||
#
|
||||
# Additional benefit of this method is that we do not have to rely
|
||||
# on the average block time, which not only makes the whole thing
|
||||
# more reliable, but also easier to test.
|
||||
|
||||
# Are lucky today?
|
||||
if earliestBlockTimestamp == epochTimeUInt256:
|
||||
return earliestBlockNumber
|
||||
if latestBlockTimestamp == epochTimeUInt256:
|
||||
return latestBlockNumber
|
||||
|
||||
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
|
||||
let availableHistoryInDays =
|
||||
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
|
||||
warn "Short block history detected.",
|
||||
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
|
||||
return earliestBlockNumber
|
||||
|
||||
return await provider.binarySearchBlockNumberForEpoch(
|
||||
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
|
||||
)
|
||||
|
||||
proc pastBlockTag*(
|
||||
provider: Provider, blocksAgo: int
|
||||
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let head = await provider.getBlockNumber()
|
||||
return BlockTag.init(head - blocksAgo.abs.u256)
|
||||
@ -2,13 +2,14 @@ import std/hashes
|
||||
import std/sequtils
|
||||
import std/typetraits
|
||||
import pkg/contractabi
|
||||
import pkg/nimcrypto
|
||||
import pkg/ethers/fields
|
||||
import pkg/nimcrypto/keccak
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/upraises
|
||||
import pkg/libp2p/[cid, multicodec]
|
||||
import ../logutils
|
||||
import ../utils/json
|
||||
from ../errors import mapFailure
|
||||
|
||||
export contractabi
|
||||
|
||||
@ -17,22 +18,26 @@ type
|
||||
client* {.serialize.}: Address
|
||||
ask* {.serialize.}: StorageAsk
|
||||
content* {.serialize.}: StorageContent
|
||||
expiry* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: uint64
|
||||
nonce*: Nonce
|
||||
|
||||
StorageAsk* = object
|
||||
slots* {.serialize.}: uint64
|
||||
slotSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
reward* {.serialize.}: UInt256
|
||||
collateral* {.serialize.}: UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
slots* {.serialize.}: uint64
|
||||
slotSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
maxSlotLoss* {.serialize.}: uint64
|
||||
|
||||
StorageContent* = object
|
||||
cid* {.serialize.}: string
|
||||
cid* {.serialize.}: Cid
|
||||
merkleRoot*: array[32, byte]
|
||||
|
||||
Slot* = object
|
||||
request* {.serialize.}: StorageRequest
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
slotIndex* {.serialize.}: uint64
|
||||
|
||||
SlotId* = distinct array[32, byte]
|
||||
RequestId* = distinct array[32, byte]
|
||||
Nonce* = distinct array[32, byte]
|
||||
@ -42,6 +47,7 @@ type
|
||||
Cancelled
|
||||
Finished
|
||||
Failed
|
||||
|
||||
SlotState* {.pure.} = enum
|
||||
Free
|
||||
Filled
|
||||
@ -49,6 +55,7 @@ type
|
||||
Failed
|
||||
Paid
|
||||
Cancelled
|
||||
Repair
|
||||
|
||||
proc `==`*(x, y: Nonce): bool {.borrow.}
|
||||
proc `==`*(x, y: RequestId): bool {.borrow.}
|
||||
@ -80,44 +87,43 @@ proc toHex*[T: distinct](id: T): string =
|
||||
type baseType = T.distinctBase
|
||||
baseType(id).toHex
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, Nonce):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, RequestId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, SlotId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, Nonce):
|
||||
it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, RequestId):
|
||||
it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SlotId):
|
||||
it.to0xHexLog
|
||||
|
||||
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
||||
StorageRequest(
|
||||
client: tupl[0],
|
||||
ask: tupl[1],
|
||||
content: tupl[2],
|
||||
expiry: tupl[3],
|
||||
nonce: tupl[4]
|
||||
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
|
||||
)
|
||||
|
||||
func fromTuple(_: type Slot, tupl: tuple): Slot =
|
||||
Slot(
|
||||
request: tupl[0],
|
||||
slotIndex: tupl[1]
|
||||
)
|
||||
Slot(request: tupl[0], slotIndex: tupl[1])
|
||||
|
||||
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||
StorageAsk(
|
||||
slots: tupl[0],
|
||||
slotSize: tupl[1],
|
||||
duration: tupl[2],
|
||||
proofProbability: tupl[3],
|
||||
reward: tupl[4],
|
||||
collateral: tupl[5],
|
||||
maxSlotLoss: tupl[6]
|
||||
proofProbability: tupl[0],
|
||||
pricePerBytePerSecond: tupl[1],
|
||||
collateralPerByte: tupl[2],
|
||||
slots: tupl[3],
|
||||
slotSize: tupl[4],
|
||||
duration: tupl[5],
|
||||
maxSlotLoss: tupl[6],
|
||||
)
|
||||
|
||||
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
||||
StorageContent(
|
||||
cid: tupl[0],
|
||||
merkleRoot: tupl[1]
|
||||
)
|
||||
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
|
||||
|
||||
func solidityType*(_: type Cid): string =
|
||||
solidityType(seq[byte])
|
||||
|
||||
func solidityType*(_: type StorageContent): string =
|
||||
solidityType(StorageContent.fieldTypes)
|
||||
@ -128,6 +134,10 @@ func solidityType*(_: type StorageAsk): string =
|
||||
func solidityType*(_: type StorageRequest): string =
|
||||
solidityType(StorageRequest.fieldTypes)
|
||||
|
||||
# Note: it seems to be ok to ignore the vbuffer offset for now
|
||||
func encode*(encoder: var AbiEncoder, cid: Cid) =
|
||||
encoder.write(cid.data.buffer)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, content: StorageContent) =
|
||||
encoder.write(content.fieldValues)
|
||||
|
||||
@ -140,8 +150,12 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
|
||||
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
|
||||
encoder.write(request.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, request: Slot) =
|
||||
encoder.write(request.fieldValues)
|
||||
func encode*(encoder: var AbiEncoder, slot: Slot) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
|
||||
let data = ?decoder.read(seq[byte])
|
||||
Cid.init(data).mapFailure
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
|
||||
let tupl = ?decoder.read(StorageContent.fieldTypes)
|
||||
@ -160,27 +174,33 @@ func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
|
||||
success Slot.fromTuple(tupl)
|
||||
|
||||
func id*(request: StorageRequest): RequestId =
|
||||
let encoding = AbiEncoder.encode((request, ))
|
||||
let encoding = AbiEncoder.encode((request,))
|
||||
RequestId(keccak256.digest(encoding).data)
|
||||
|
||||
func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId =
|
||||
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
|
||||
let encoding = AbiEncoder.encode((requestId, slotIndex))
|
||||
SlotId(keccak256.digest(encoding).data)
|
||||
|
||||
func slotId*(request: StorageRequest, slotIndex: UInt256): SlotId =
|
||||
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
|
||||
slotId(request.id, slotIndex)
|
||||
|
||||
func id*(slot: Slot): SlotId =
|
||||
slotId(slot.request, slot.slotIndex)
|
||||
|
||||
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.duration * ask.reward
|
||||
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
|
||||
ask.pricePerBytePerSecond * ask.slotSize.u256
|
||||
|
||||
func price*(ask: StorageAsk): UInt256 =
|
||||
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.duration.u256 * ask.pricePerSlotPerSecond
|
||||
|
||||
func totalPrice*(ask: StorageAsk): UInt256 =
|
||||
ask.slots.u256 * ask.pricePerSlot
|
||||
|
||||
func price*(request: StorageRequest): UInt256 =
|
||||
request.ask.price
|
||||
func totalPrice*(request: StorageRequest): UInt256 =
|
||||
request.ask.totalPrice
|
||||
|
||||
func size*(ask: StorageAsk): UInt256 =
|
||||
ask.slots.u256 * ask.slotSize
|
||||
func collateralPerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.collateralPerByte * ask.slotSize.u256
|
||||
|
||||
func size*(ask: StorageAsk): uint64 =
|
||||
ask.slots * ask.slotSize
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,16 +7,19 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/algorithm
|
||||
import std/net
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/shims/net
|
||||
import pkg/contractabi/address as ca
|
||||
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
||||
from pkg/nimcrypto import keccak256
|
||||
|
||||
import ./rng
|
||||
import ./errors
|
||||
@ -31,15 +34,16 @@ export discv5
|
||||
logScope:
|
||||
topics = "codex discovery"
|
||||
|
||||
type
|
||||
Discovery* = ref object of RootObj
|
||||
protocol*: discv5.Protocol # dht protocol
|
||||
key: PrivateKey # private key
|
||||
peerId: PeerId # the peer id of the local node
|
||||
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
||||
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
|
||||
# address that the node can be connected on
|
||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||
type Discovery* = ref object of RootObj
|
||||
protocol*: discv5.Protocol # dht protocol
|
||||
key: PrivateKey # private key
|
||||
peerId: PeerId # the peer id of the local node
|
||||
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
||||
providerRecord*: ?SignedPeerRecord
|
||||
# record to advertice node connection information, this carry any
|
||||
# address that the node can be connected on
|
||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||
isStarted: bool
|
||||
|
||||
proc toNodeId*(cid: Cid): NodeId =
|
||||
## Cid to discovery id
|
||||
@ -54,82 +58,121 @@ proc toNodeId*(host: ca.Address): NodeId =
|
||||
readUintBE[256](keccak256.digest(host.toArray).data)
|
||||
|
||||
proc findPeer*(
|
||||
d: Discovery,
|
||||
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
|
||||
trace "protocol.resolve..."
|
||||
## Find peer using the given Discovery object
|
||||
##
|
||||
let
|
||||
node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
return
|
||||
if node.isSome():
|
||||
node.get().record.data.some
|
||||
else:
|
||||
PeerRecord.none
|
||||
try:
|
||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
return
|
||||
if node.isSome():
|
||||
node.get().record.data.some
|
||||
else:
|
||||
PeerRecord.none
|
||||
except CancelledError as exc:
|
||||
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||
|
||||
return PeerRecord.none
|
||||
|
||||
method find*(
|
||||
d: Discovery,
|
||||
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
d: Discovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Find block providers
|
||||
##
|
||||
without providers =?
|
||||
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
return providers.filterIt( not (it.data.peerId == d.peerId) )
|
||||
try:
|
||||
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
|
||||
error:
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||
except CancelledError as exc:
|
||||
warn "Error finding providers for block", cid, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding providers for block", cid, exc = exc.msg
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
|
||||
## Provide a block Cid
|
||||
##
|
||||
let
|
||||
nodes = await d.protocol.addProvider(
|
||||
cid.toNodeId(), d.providerRecord.get)
|
||||
|
||||
if nodes.len <= 0:
|
||||
warn "Couldn't provide to any nodes!"
|
||||
try:
|
||||
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||
|
||||
if nodes.len <= 0:
|
||||
warn "Couldn't provide to any nodes!"
|
||||
except CancelledError as exc:
|
||||
warn "Error providing block", cid, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error providing block", cid, exc = exc.msg
|
||||
|
||||
method find*(
|
||||
d: Discovery,
|
||||
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
d: Discovery, host: ca.Address
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Find host providers
|
||||
##
|
||||
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =?
|
||||
(await d.protocol.getProviders(host.toNodeId())).mapFailure, error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
try:
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||
error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
|
||||
return providers
|
||||
return providers
|
||||
except CancelledError as exc:
|
||||
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||
|
||||
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
||||
method provide*(
|
||||
d: Discovery, host: ca.Address
|
||||
) {.async: (raises: [CancelledError]), base.} =
|
||||
## Provide hosts
|
||||
##
|
||||
|
||||
trace "Providing host", host = $host
|
||||
let
|
||||
nodes = await d.protocol.addProvider(
|
||||
host.toNodeId(), d.providerRecord.get)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
try:
|
||||
trace "Providing host", host = $host
|
||||
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
except CancelledError as exc:
|
||||
warn "Error providing host", host = $host, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error providing host", host = $host, exc = exc.msg
|
||||
|
||||
method removeProvider*(
|
||||
d: Discovery,
|
||||
peerId: PeerId): Future[void] {.base.} =
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[void] {.base, async: (raises: [CancelledError]).} =
|
||||
## Remove provider from providers table
|
||||
##
|
||||
|
||||
trace "Removing provider", peerId
|
||||
d.protocol.removeProvidersLocal(peerId)
|
||||
try:
|
||||
await d.protocol.removeProvidersLocal(peerId)
|
||||
except CancelledError as exc:
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
except Exception as exc: # Something in discv5 is raising Exception
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
raiseAssert("Unexpected Exception in removeProvider")
|
||||
|
||||
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
## Update providers record
|
||||
@ -137,54 +180,58 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
|
||||
d.announceAddrs = @addrs
|
||||
|
||||
trace "Updating announce record", addrs = d.announceAddrs
|
||||
d.providerRecord = SignedPeerRecord.init(
|
||||
d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
||||
.expect("Should construct signed record").some
|
||||
info "Updating announce record", addrs = d.announceAddrs
|
||||
d.providerRecord = SignedPeerRecord
|
||||
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
||||
.expect("Should construct signed record").some
|
||||
|
||||
if not d.protocol.isNil:
|
||||
d.protocol.updateRecord(d.providerRecord)
|
||||
.expect("Should update SPR")
|
||||
d.protocol.updateRecord(d.providerRecord).expect("Should update SPR")
|
||||
|
||||
proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
|
||||
proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
## Update providers record
|
||||
##
|
||||
|
||||
trace "Updating Dht record", ip, port = $port
|
||||
d.dhtRecord = SignedPeerRecord.init(
|
||||
d.key, PeerRecord.init(d.peerId, @[
|
||||
MultiAddress.init(
|
||||
ip,
|
||||
IpTransportProtocol.udpProtocol,
|
||||
port)])).expect("Should construct signed record").some
|
||||
info "Updating Dht record", addrs = addrs
|
||||
d.dhtRecord = SignedPeerRecord
|
||||
.init(d.key, PeerRecord.init(d.peerId, @addrs))
|
||||
.expect("Should construct signed record").some
|
||||
|
||||
if not d.protocol.isNil:
|
||||
d.protocol.updateRecord(d.dhtRecord)
|
||||
.expect("Should update SPR")
|
||||
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
|
||||
|
||||
proc start*(d: Discovery) {.async.} =
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
proc start*(d: Discovery) {.async: (raises: []).} =
|
||||
try:
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
d.isStarted = true
|
||||
except CatchableError as exc:
|
||||
error "Error starting discovery", exc = exc.msg
|
||||
|
||||
proc stop*(d: Discovery) {.async.} =
|
||||
await d.protocol.closeWait()
|
||||
proc stop*(d: Discovery) {.async: (raises: []).} =
|
||||
if not d.isStarted:
|
||||
warn "Discovery not started, skipping stop"
|
||||
return
|
||||
|
||||
try:
|
||||
await noCancel d.protocol.closeWait()
|
||||
except CatchableError as exc:
|
||||
error "Error stopping discovery", exc = exc.msg
|
||||
|
||||
proc new*(
|
||||
T: type Discovery,
|
||||
key: PrivateKey,
|
||||
bindIp = ValidIpAddress.init(IPv4_any()),
|
||||
bindIp = IPv4_any(),
|
||||
bindPort = 0.Port,
|
||||
announceAddrs: openArray[MultiAddress],
|
||||
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
||||
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
|
||||
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"),
|
||||
): Discovery =
|
||||
## Create a new Discovery node instance for the given key and datastore
|
||||
##
|
||||
|
||||
var
|
||||
self = Discovery(
|
||||
key: key,
|
||||
peerId: PeerId.init(key).expect("Should construct PeerId"))
|
||||
var self =
|
||||
Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId"))
|
||||
|
||||
self.updateAnnounceRecord(announceAddrs)
|
||||
|
||||
@ -192,22 +239,20 @@ proc new*(
|
||||
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
|
||||
# and figure out proper solution.
|
||||
let discoveryConfig = DiscoveryConfig(
|
||||
tableIpLimits: TableIpLimits(
|
||||
tableIpLimit: high(uint),
|
||||
bucketIpLimit:high(uint)
|
||||
),
|
||||
bitsPerHop: DefaultBitsPerHop
|
||||
tableIpLimits: TableIpLimits(tableIpLimit: high(uint), bucketIpLimit: high(uint)),
|
||||
bitsPerHop: DefaultBitsPerHop,
|
||||
)
|
||||
# --------------------------------------------------------------------------
|
||||
|
||||
self.protocol = newProtocol(
|
||||
key,
|
||||
bindIp = bindIp.toNormalIp,
|
||||
bindIp = bindIp,
|
||||
bindPort = bindPort,
|
||||
record = self.providerRecord.get,
|
||||
bootstrapRecords = bootstrapNodes,
|
||||
rng = Rng.instance(),
|
||||
providers = ProvidersManager.new(store),
|
||||
config = discoveryConfig)
|
||||
config = discoveryConfig,
|
||||
)
|
||||
|
||||
self
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
||||
@ -1,225 +0,0 @@
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2024 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/taskpools
|
||||
import pkg/taskpools/flowvars
|
||||
import pkg/chronos
|
||||
import pkg/chronos/threadsync
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./backend
|
||||
import ../errors
|
||||
import ../logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex asyncerasure"
|
||||
|
||||
const
|
||||
CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal
|
||||
CompletitionRetryDelay = 10.millis
|
||||
|
||||
type
|
||||
EncoderBackendPtr = ptr EncoderBackend
|
||||
DecoderBackendPtr = ptr DecoderBackend
|
||||
|
||||
# Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy
|
||||
EncodeTaskArgs = object
|
||||
signal: ThreadSignalPtr
|
||||
backend: EncoderBackendPtr
|
||||
blockSize: int
|
||||
ecM: int
|
||||
|
||||
DecodeTaskArgs = object
|
||||
signal: ThreadSignalPtr
|
||||
backend: DecoderBackendPtr
|
||||
blockSize: int
|
||||
ecK: int
|
||||
|
||||
SharedArrayHolder*[T] = object
|
||||
data: ptr UncheckedArray[T]
|
||||
size: int
|
||||
|
||||
EncodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||
DecodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||
|
||||
proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
|
||||
var
|
||||
data = data.unsafeAddr
|
||||
parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize))
|
||||
|
||||
try:
|
||||
let res = args.backend[].encode(data[], parity)
|
||||
|
||||
if res.isOk:
|
||||
let
|
||||
resDataSize = parity.len * args.blockSize
|
||||
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||
arrHolder = SharedArrayHolder[byte](
|
||||
data: resData,
|
||||
size: resDataSize
|
||||
)
|
||||
|
||||
for i in 0..<parity.len:
|
||||
copyMem(addr resData[i * args.blockSize], addr parity[i][0], args.blockSize)
|
||||
|
||||
return ok(arrHolder)
|
||||
else:
|
||||
return err(res.error)
|
||||
except CatchableError as exception:
|
||||
return err(exception.msg.cstring)
|
||||
finally:
|
||||
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||
error "Error firing signal", msg = err.msg
|
||||
|
||||
proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]): DecodeTaskResult =
|
||||
var
|
||||
data = data.unsafeAddr
|
||||
parity = parity.unsafeAddr
|
||||
recovered = newSeqWith[seq[byte]](args.ecK, newSeq[byte](args.blockSize))
|
||||
|
||||
try:
|
||||
let res = args.backend[].decode(data[], parity[], recovered)
|
||||
|
||||
if res.isOk:
|
||||
let
|
||||
resDataSize = recovered.len * args.blockSize
|
||||
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||
arrHolder = SharedArrayHolder[byte](
|
||||
data: resData,
|
||||
size: resDataSize
|
||||
)
|
||||
|
||||
for i in 0..<recovered.len:
|
||||
copyMem(addr resData[i * args.blockSize], addr recovered[i][0], args.blockSize)
|
||||
|
||||
return ok(arrHolder)
|
||||
else:
|
||||
return err(res.error)
|
||||
except CatchableError as exception:
|
||||
return err(exception.msg.cstring)
|
||||
finally:
|
||||
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||
error "Error firing signal", msg = err.msg
|
||||
|
||||
proc proxySpawnEncodeTask(
|
||||
tp: Taskpool,
|
||||
args: EncodeTaskArgs,
|
||||
data: ref seq[seq[byte]]
|
||||
): Flowvar[EncodeTaskResult] =
|
||||
# FIXME Uncomment the code below after addressing an issue:
|
||||
# https://github.com/codex-storage/nim-codex/issues/854
|
||||
|
||||
# tp.spawn encodeTask(args, data[])
|
||||
|
||||
let fv = EncodeTaskResult.newFlowVar
|
||||
fv.readyWith(encodeTask(args, data[]))
|
||||
return fv
|
||||
|
||||
proc proxySpawnDecodeTask(
|
||||
tp: Taskpool,
|
||||
args: DecodeTaskArgs,
|
||||
data: ref seq[seq[byte]],
|
||||
parity: ref seq[seq[byte]]
|
||||
): Flowvar[DecodeTaskResult] =
|
||||
# FIXME Uncomment the code below after addressing an issue:
|
||||
# https://github.com/codex-storage/nim-codex/issues/854
|
||||
|
||||
# tp.spawn decodeTask(args, data[], parity[])
|
||||
|
||||
let fv = DecodeTaskResult.newFlowVar
|
||||
fv.readyWith(decodeTask(args, data[], parity[]))
|
||||
return fv
|
||||
|
||||
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
|
||||
await wait(signal)
|
||||
|
||||
var
|
||||
res: T
|
||||
awaitTotal: Duration
|
||||
while awaitTotal < CompletitionTimeout:
|
||||
if handle.tryComplete(res):
|
||||
return success(res)
|
||||
else:
|
||||
awaitTotal += CompletitionRetryDelay
|
||||
await sleepAsync(CompletitionRetryDelay)
|
||||
|
||||
return failure("Task signaled finish but didn't return any result within " & $CompletitionRetryDelay)
|
||||
|
||||
proc asyncEncode*(
|
||||
tp: Taskpool,
|
||||
backend: EncoderBackend,
|
||||
data: ref seq[seq[byte]],
|
||||
blockSize: int,
|
||||
ecM: int
|
||||
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||
return failure(err)
|
||||
|
||||
try:
|
||||
let
|
||||
blockSize = data[0].len
|
||||
args = EncodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM)
|
||||
handle = proxySpawnEncodeTask(tp, args, data)
|
||||
|
||||
without res =? await awaitResult(signal, handle), err:
|
||||
return failure(err)
|
||||
|
||||
if res.isOk:
|
||||
var parity = seq[seq[byte]].new()
|
||||
parity[].setLen(ecM)
|
||||
|
||||
for i in 0..<parity[].len:
|
||||
parity[i] = newSeq[byte](blockSize)
|
||||
copyMem(addr parity[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||
|
||||
deallocShared(res.value.data)
|
||||
|
||||
return success(parity)
|
||||
else:
|
||||
return failure($res.error)
|
||||
finally:
|
||||
if err =? signal.close().mapFailure.errorOption():
|
||||
error "Error closing signal", msg = $err.msg
|
||||
|
||||
proc asyncDecode*(
|
||||
tp: Taskpool,
|
||||
backend: DecoderBackend,
|
||||
data, parity: ref seq[seq[byte]],
|
||||
blockSize: int
|
||||
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||
return failure(err)
|
||||
|
||||
try:
|
||||
let
|
||||
ecK = data[].len
|
||||
args = DecodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK)
|
||||
handle = proxySpawnDecodeTask(tp, args, data, parity)
|
||||
|
||||
without res =? await awaitResult(signal, handle), err:
|
||||
return failure(err)
|
||||
|
||||
if res.isOk:
|
||||
var recovered = seq[seq[byte]].new()
|
||||
recovered[].setLen(ecK)
|
||||
|
||||
for i in 0..<recovered[].len:
|
||||
recovered[i] = newSeq[byte](blockSize)
|
||||
copyMem(addr recovered[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||
|
||||
deallocShared(res.value.data)
|
||||
|
||||
return success(recovered)
|
||||
else:
|
||||
return failure($res.error)
|
||||
finally:
|
||||
if err =? signal.close().mapFailure.errorOption():
|
||||
error "Error closing signal", msg = $err.msg
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,41 +7,38 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import ../stores
|
||||
|
||||
type
|
||||
ErasureBackend* = ref object of RootObj
|
||||
blockSize*: int # block size in bytes
|
||||
buffers*: int # number of original pieces
|
||||
parity*: int # number of redundancy pieces
|
||||
buffers*: int # number of original pieces
|
||||
parity*: int # number of redundancy pieces
|
||||
|
||||
EncoderBackend* = ref object of ErasureBackend
|
||||
DecoderBackend* = ref object of ErasureBackend
|
||||
|
||||
method release*(self: ErasureBackend) {.base.} =
|
||||
method release*(self: ErasureBackend) {.base, gcsafe.} =
|
||||
## release the backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method encode*(
|
||||
self: EncoderBackend,
|
||||
buffers,
|
||||
parity: var openArray[seq[byte]]
|
||||
): Result[void, cstring] {.base.} =
|
||||
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen: int,
|
||||
): Result[void, cstring] {.base, gcsafe.} =
|
||||
## encode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method decode*(
|
||||
self: DecoderBackend,
|
||||
buffers,
|
||||
parity,
|
||||
recovered: var openArray[seq[byte]]
|
||||
): Result[void, cstring] {.base.} =
|
||||
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen, recoveredLen: int,
|
||||
): Result[void, cstring] {.base, gcsafe.} =
|
||||
## decode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -10,7 +10,7 @@
|
||||
import std/options
|
||||
|
||||
import pkg/leopard
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
|
||||
import ../backend
|
||||
|
||||
@ -22,43 +22,39 @@ type
|
||||
decoder*: Option[LeoDecoder]
|
||||
|
||||
method encode*(
|
||||
self: LeoEncoderBackend,
|
||||
data,
|
||||
parity: var openArray[seq[byte]]): Result[void, cstring] =
|
||||
self: LeoEncoderBackend,
|
||||
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen: int,
|
||||
): Result[void, cstring] =
|
||||
## Encode data using Leopard backend
|
||||
|
||||
if parity.len == 0:
|
||||
if parityLen == 0:
|
||||
return ok()
|
||||
|
||||
var encoder = if self.encoder.isNone:
|
||||
self.encoder = (? LeoEncoder.init(
|
||||
self.blockSize,
|
||||
self.buffers,
|
||||
self.parity)).some
|
||||
var encoder =
|
||||
if self.encoder.isNone:
|
||||
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
|
||||
self.encoder.get()
|
||||
else:
|
||||
self.encoder.get()
|
||||
|
||||
encoder.encode(data, parity)
|
||||
encoder.encode(data, parity, dataLen, parityLen)
|
||||
|
||||
method decode*(
|
||||
self: LeoDecoderBackend,
|
||||
data,
|
||||
parity,
|
||||
recovered: var openArray[seq[byte]]): Result[void, cstring] =
|
||||
self: LeoDecoderBackend,
|
||||
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen, recoveredLen: int,
|
||||
): Result[void, cstring] =
|
||||
## Decode data using given Leopard backend
|
||||
|
||||
var decoder =
|
||||
if self.decoder.isNone:
|
||||
self.decoder = (? LeoDecoder.init(
|
||||
self.blockSize,
|
||||
self.buffers,
|
||||
self.parity)).some
|
||||
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
|
||||
self.decoder.get()
|
||||
else:
|
||||
self.decoder.get()
|
||||
|
||||
decoder.decode(data, parity, recovered)
|
||||
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
|
||||
|
||||
method release*(self: LeoEncoderBackend) =
|
||||
if self.encoder.isSome:
|
||||
@ -69,25 +65,15 @@ method release*(self: LeoDecoderBackend) =
|
||||
self.decoder.get().free()
|
||||
|
||||
proc new*(
|
||||
T: type LeoEncoderBackend,
|
||||
blockSize,
|
||||
buffers,
|
||||
parity: int): LeoEncoderBackend =
|
||||
T: type LeoEncoderBackend, blockSize, buffers, parity: int
|
||||
): LeoEncoderBackend =
|
||||
## Create an instance of an Leopard Encoder backend
|
||||
##
|
||||
LeoEncoderBackend(
|
||||
blockSize: blockSize,
|
||||
buffers: buffers,
|
||||
parity: parity)
|
||||
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
|
||||
|
||||
proc new*(
|
||||
T: type LeoDecoderBackend,
|
||||
blockSize,
|
||||
buffers,
|
||||
parity: int): LeoDecoderBackend =
|
||||
T: type LeoDecoderBackend, blockSize, buffers, parity: int
|
||||
): LeoDecoderBackend =
|
||||
## Create an instance of an Leopard Decoder backend
|
||||
##
|
||||
LeoDecoderBackend(
|
||||
blockSize: blockSize,
|
||||
buffers: buffers,
|
||||
parity: parity)
|
||||
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,14 +7,13 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import std/[sugar, atomics, sequtils]
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronos/threadsync
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p/[multicodec, cid, multihash]
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/taskpools
|
||||
@ -23,16 +22,17 @@ import ../logutils
|
||||
import ../manifest
|
||||
import ../merkletree
|
||||
import ../stores
|
||||
import ../clock
|
||||
import ../blocktype as bt
|
||||
import ../utils
|
||||
import ../utils/asynciter
|
||||
import ../indexingstrategy
|
||||
import ../errors
|
||||
import ../utils/arrayutils
|
||||
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import ./backend
|
||||
import ./asyncbackend
|
||||
|
||||
export backend
|
||||
|
||||
@ -62,18 +62,17 @@ type
|
||||
## columns (with up to M blocks missing per column),
|
||||
## or any combination there of.
|
||||
##
|
||||
EncoderProvider* =
|
||||
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
|
||||
|
||||
EncoderProvider* = proc(size, blocks, parity: int): EncoderBackend
|
||||
{.raises: [Defect], noSideEffect.}
|
||||
|
||||
DecoderProvider* = proc(size, blocks, parity: int): DecoderBackend
|
||||
{.raises: [Defect], noSideEffect.}
|
||||
DecoderProvider* =
|
||||
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
|
||||
|
||||
Erasure* = ref object
|
||||
taskPool: Taskpool
|
||||
encoderProvider*: EncoderProvider
|
||||
decoderProvider*: DecoderProvider
|
||||
store*: BlockStore
|
||||
taskpool: Taskpool
|
||||
|
||||
EncodingParams = object
|
||||
ecK: Natural
|
||||
@ -90,6 +89,24 @@ type
|
||||
# provided.
|
||||
minSize*: NBytes
|
||||
|
||||
EncodeTask = object
|
||||
success: Atomic[bool]
|
||||
erasure: ptr Erasure
|
||||
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
blockSize, blocksLen, parityLen: int
|
||||
signal: ThreadSignalPtr
|
||||
|
||||
DecodeTask = object
|
||||
success: Atomic[bool]
|
||||
erasure: ptr Erasure
|
||||
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
blockSize, blocksLen: int
|
||||
parityLen, recoveredLen: int
|
||||
signal: ThreadSignalPtr
|
||||
|
||||
func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||
## Convert an index to a position in the encoded
|
||||
## dataset
|
||||
@ -101,21 +118,25 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||
(idx - step) div steps
|
||||
|
||||
proc getPendingBlocks(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
|
||||
self: Erasure, manifest: Manifest, indices: seq[int]
|
||||
): AsyncIter[(?!bt.Block, int)] =
|
||||
## Get pending blocks iterator
|
||||
##
|
||||
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
|
||||
|
||||
var
|
||||
proc attachIndex(
|
||||
fut: Future[?!bt.Block], i: int
|
||||
): Future[(?!bt.Block, int)] {.async.} =
|
||||
## avoids closure capture issues
|
||||
return (await fut, i)
|
||||
|
||||
for blockIndex in indices:
|
||||
# request blocks from the store
|
||||
pendingBlocks = indicies.map( (i: int) =>
|
||||
self.store.getBlock(
|
||||
BlockAddress.init(manifest.treeCid, i)
|
||||
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
|
||||
)
|
||||
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
|
||||
pendingBlocks.add(attachIndex(fut, blockIndex))
|
||||
|
||||
proc isFinished(): bool = pendingBlocks.len == 0
|
||||
proc isFinished(): bool =
|
||||
pendingBlocks.len == 0
|
||||
|
||||
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
||||
let completedFut = await one(pendingBlocks)
|
||||
@ -126,36 +147,38 @@ proc getPendingBlocks(
|
||||
let (_, index) = await completedFut
|
||||
raise newException(
|
||||
CatchableError,
|
||||
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
|
||||
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
|
||||
$index,
|
||||
)
|
||||
|
||||
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
|
||||
|
||||
proc prepareEncodingData(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
params: EncodingParams,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
params: EncodingParams,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte],
|
||||
): Future[?!Natural] {.async.} =
|
||||
## Prepare data for encoding
|
||||
##
|
||||
|
||||
let
|
||||
strategy = params.strategy.init(
|
||||
firstIndex = 0,
|
||||
lastIndex = params.rounded - 1,
|
||||
iterations = params.steps
|
||||
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
|
||||
)
|
||||
indicies = toSeq(strategy.getIndicies(step))
|
||||
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
||||
indices = toSeq(strategy.getIndices(step))
|
||||
pendingBlocksIter =
|
||||
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
|
||||
|
||||
var resolved = 0
|
||||
for fut in pendingBlocksIter:
|
||||
let (blkOrErr, idx) = await fut
|
||||
without blk =? blkOrErr, err:
|
||||
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
||||
continue
|
||||
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
||||
return failure(err)
|
||||
|
||||
let pos = indexToPos(params.steps, idx, step)
|
||||
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
@ -163,24 +186,26 @@ proc prepareEncodingData(
|
||||
|
||||
resolved.inc()
|
||||
|
||||
for idx in indicies.filterIt(it >= manifest.blocksCount):
|
||||
for idx in indices.filterIt(it >= manifest.blocksCount):
|
||||
let pos = indexToPos(params.steps, idx, step)
|
||||
trace "Padding with empty block", idx
|
||||
shallowCopy(data[pos], emptyBlock)
|
||||
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
|
||||
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
|
||||
err:
|
||||
return failure(err)
|
||||
cids[idx] = emptyBlockCid
|
||||
|
||||
success(resolved.Natural)
|
||||
|
||||
proc prepareDecodingData(
|
||||
self: Erasure,
|
||||
encoded: Manifest,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
parityData: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
|
||||
self: Erasure,
|
||||
encoded: Manifest,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
parityData: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte],
|
||||
): Future[?!(Natural, Natural)] {.async.} =
|
||||
## Prepare data for decoding
|
||||
## `encoded` - the encoded manifest
|
||||
## `step` - the current step
|
||||
@ -192,12 +217,10 @@ proc prepareDecodingData(
|
||||
|
||||
let
|
||||
strategy = encoded.protectedStrategy.init(
|
||||
firstIndex = 0,
|
||||
lastIndex = encoded.blocksCount - 1,
|
||||
iterations = encoded.steps
|
||||
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
|
||||
)
|
||||
indicies = toSeq(strategy.getIndicies(step))
|
||||
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
||||
indices = toSeq(strategy.getIndices(step))
|
||||
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
|
||||
|
||||
var
|
||||
dataPieces = 0
|
||||
@ -211,23 +234,24 @@ proc prepareDecodingData(
|
||||
|
||||
let (blkOrErr, idx) = await fut
|
||||
without blk =? blkOrErr, err:
|
||||
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||
continue
|
||||
|
||||
let
|
||||
pos = indexToPos(encoded.steps, idx, step)
|
||||
let pos = indexToPos(encoded.steps, idx, step)
|
||||
|
||||
logScope:
|
||||
cid = blk.cid
|
||||
idx = idx
|
||||
pos = pos
|
||||
step = step
|
||||
cid = blk.cid
|
||||
idx = idx
|
||||
pos = pos
|
||||
step = step
|
||||
empty = blk.isEmpty
|
||||
|
||||
cids[idx] = blk.cid
|
||||
if idx >= encoded.rounded:
|
||||
trace "Retrieved parity block"
|
||||
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
shallowCopy(
|
||||
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
|
||||
)
|
||||
parityPieces.inc
|
||||
else:
|
||||
trace "Retrieved data block"
|
||||
@ -239,17 +263,19 @@ proc prepareDecodingData(
|
||||
return success (dataPieces.Natural, parityPieces.Natural)
|
||||
|
||||
proc init*(
|
||||
_: type EncodingParams,
|
||||
manifest: Manifest,
|
||||
ecK: Natural, ecM: Natural,
|
||||
strategy: StrategyType): ?!EncodingParams =
|
||||
_: type EncodingParams,
|
||||
manifest: Manifest,
|
||||
ecK: Natural,
|
||||
ecM: Natural,
|
||||
strategy: StrategyType,
|
||||
): ?!EncodingParams =
|
||||
if ecK > manifest.blocksCount:
|
||||
let exc = (ref InsufficientBlocksError)(
|
||||
msg: "Unable to encode manifest, not enough blocks, ecK = " &
|
||||
$ecK &
|
||||
", blocksCount = " &
|
||||
$manifest.blocksCount,
|
||||
minSize: ecK.NBytes * manifest.blockSize)
|
||||
msg:
|
||||
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
|
||||
", blocksCount = " & $manifest.blocksCount,
|
||||
minSize: ecK.NBytes * manifest.blockSize,
|
||||
)
|
||||
return failure(exc)
|
||||
|
||||
let
|
||||
@ -263,62 +289,139 @@ proc init*(
|
||||
rounded: rounded,
|
||||
steps: steps,
|
||||
blocksCount: blocksCount,
|
||||
strategy: strategy
|
||||
strategy: strategy,
|
||||
)
|
||||
|
||||
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
|
||||
# Task suitable for running in taskpools - look, no GC!
|
||||
let encoder =
|
||||
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
encoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res =
|
||||
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
|
||||
res.isErr
|
||||
):
|
||||
warn "Error from leopard encoder backend!", error = $res.error
|
||||
|
||||
task[].success.store(false)
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc asyncEncode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks: ref seq[seq[byte]],
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var data = makeUncheckedArray(blocks)
|
||||
|
||||
defer:
|
||||
dealloc(data)
|
||||
|
||||
## Create an ecode task with block data
|
||||
var task = EncodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
blocks: data,
|
||||
parity: parity,
|
||||
signal: threadPtr,
|
||||
)
|
||||
|
||||
doAssert self.taskPool.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not task.success.load():
|
||||
return failure("Leopard encoding task failed")
|
||||
|
||||
success()
|
||||
|
||||
proc encodeData(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
params: EncodingParams
|
||||
): Future[?!Manifest] {.async.} =
|
||||
self: Erasure, manifest: Manifest, params: EncodingParams
|
||||
): Future[?!Manifest] {.async.} =
|
||||
## Encode blocks pointed to by the protected manifest
|
||||
##
|
||||
## `manifest` - the manifest to encode
|
||||
##
|
||||
|
||||
logScope:
|
||||
steps = params.steps
|
||||
rounded_blocks = params.rounded
|
||||
blocks_count = params.blocksCount
|
||||
ecK = params.ecK
|
||||
ecM = params.ecM
|
||||
steps = params.steps
|
||||
rounded_blocks = params.rounded
|
||||
blocks_count = params.blocksCount
|
||||
ecK = params.ecK
|
||||
ecM = params.ecM
|
||||
|
||||
var
|
||||
cids = seq[Cid].new()
|
||||
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
|
||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||
|
||||
cids[].setLen(params.blocksCount)
|
||||
|
||||
try:
|
||||
for step in 0..<params.steps:
|
||||
for step in 0 ..< params.steps:
|
||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||
var
|
||||
data = seq[seq[byte]].new() # number of blocks to encode
|
||||
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
|
||||
defer:
|
||||
freeDoubleArray(parity, params.ecM)
|
||||
|
||||
data[].setLen(params.ecK)
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
# other events to be processed, this should be addressed
|
||||
# by threading
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
without resolved =?
|
||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Erasure coding data", data = data[].len, parity = params.ecM
|
||||
|
||||
without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err:
|
||||
trace "Error encoding data", err = err.msg
|
||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
|
||||
err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Erasure coding data", data = data[].len
|
||||
|
||||
try:
|
||||
if err =? (
|
||||
await self.asyncEncode(
|
||||
manifest.blockSize.int, params.ecK, params.ecM, data, parity
|
||||
)
|
||||
).errorOption:
|
||||
return failure(err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
var idx = params.rounded + step
|
||||
for j in 0..<params.ecM:
|
||||
without blk =? bt.Block.new(parity[j]), error:
|
||||
for j in 0 ..< params.ecM:
|
||||
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
|
||||
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
|
||||
error:
|
||||
trace "Unable to create parity block", err = error.msg
|
||||
return failure(error)
|
||||
|
||||
trace "Adding parity block", cid = blk.cid, idx
|
||||
cids[idx] = blk.cid
|
||||
if isErr (await self.store.putBlock(blk)):
|
||||
trace "Unable to store block!", cid = blk.cid
|
||||
if error =? (await self.store.putBlock(blk)).errorOption:
|
||||
warn "Unable to store block!", cid = blk.cid, msg = error.msg
|
||||
return failure("Unable to store block!")
|
||||
idx.inc(params.steps)
|
||||
|
||||
@ -337,7 +440,7 @@ proc encodeData(
|
||||
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
||||
ecK = params.ecK,
|
||||
ecM = params.ecM,
|
||||
strategy = params.strategy
|
||||
strategy = params.strategy,
|
||||
)
|
||||
|
||||
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
||||
@ -348,15 +451,14 @@ proc encodeData(
|
||||
except CatchableError as exc:
|
||||
trace "Erasure coding encoding error", exc = exc.msg
|
||||
return failure(exc)
|
||||
finally:
|
||||
encoder.release()
|
||||
|
||||
proc encode*(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
blocks: Natural,
|
||||
parity: Natural,
|
||||
strategy = SteppedStrategy): Future[?!Manifest] {.async.} =
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
blocks: Natural,
|
||||
parity: Natural,
|
||||
strategy = SteppedStrategy,
|
||||
): Future[?!Manifest] {.async.} =
|
||||
## Encode a manifest into one that is erasure protected.
|
||||
##
|
||||
## `manifest` - the original manifest to be encoded
|
||||
@ -372,20 +474,88 @@ proc encode*(
|
||||
|
||||
return success encodedManifest
|
||||
|
||||
proc decode*(
|
||||
self: Erasure,
|
||||
encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
## Decode a protected manifest into it's original
|
||||
## manifest
|
||||
##
|
||||
## `encoded` - the encoded (protected) manifest to
|
||||
## be recovered
|
||||
##
|
||||
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
||||
# Task suitable for running in taskpools - look, no GC!
|
||||
let decoder =
|
||||
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
decoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res = decoder.decode(
|
||||
task[].blocks,
|
||||
task[].parity,
|
||||
task[].recovered,
|
||||
task[].blocksLen,
|
||||
task[].parityLen,
|
||||
task[].recoveredLen,
|
||||
)
|
||||
res.isErr
|
||||
):
|
||||
warn "Error from leopard decoder backend!", error = $res.error
|
||||
task[].success.store(false)
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc asyncDecode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks, parity: ref seq[seq[byte]],
|
||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var
|
||||
blockData = makeUncheckedArray(blocks)
|
||||
parityData = makeUncheckedArray(parity)
|
||||
|
||||
defer:
|
||||
dealloc(blockData)
|
||||
dealloc(parityData)
|
||||
|
||||
## Create an decode task with block data
|
||||
var task = DecodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
recoveredLen: blocksLen,
|
||||
blocks: blockData,
|
||||
parity: parityData,
|
||||
recovered: recovered,
|
||||
signal: threadPtr,
|
||||
)
|
||||
|
||||
doAssert self.taskPool.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not task.success.load():
|
||||
return failure("Leopard decoding task failed")
|
||||
|
||||
success()
|
||||
|
||||
proc decodeInternal(
|
||||
self: Erasure, encoded: Manifest
|
||||
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
|
||||
logScope:
|
||||
steps = encoded.steps
|
||||
rounded_blocks = encoded.rounded
|
||||
new_manifest = encoded.blocksCount
|
||||
steps = encoded.steps
|
||||
rounded_blocks = encoded.rounded
|
||||
new_manifest = encoded.blocksCount
|
||||
|
||||
var
|
||||
cids = seq[Cid].new()
|
||||
@ -395,16 +565,27 @@ proc decode*(
|
||||
|
||||
cids[].setLen(encoded.blocksCount)
|
||||
try:
|
||||
for step in 0..<encoded.steps:
|
||||
for step in 0 ..< encoded.steps:
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
# other events to be processed, this should be addressed
|
||||
# by threading
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
var
|
||||
data = seq[seq[byte]].new()
|
||||
parity = seq[seq[byte]].new()
|
||||
parityData = seq[seq[byte]].new()
|
||||
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
|
||||
defer:
|
||||
freeDoubleArray(recovered, encoded.ecK)
|
||||
|
||||
data[].setLen(encoded.ecK) # set len to K
|
||||
parity[].setLen(encoded.ecM) # set len to M
|
||||
data[].setLen(encoded.ecK) # set len to K
|
||||
parityData[].setLen(encoded.ecM) # set len to M
|
||||
|
||||
without (dataPieces, _) =?
|
||||
(await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err:
|
||||
without (dataPieces, _) =? (
|
||||
await self.prepareDecodingData(
|
||||
encoded, step, data, parityData, cids, emptyBlock
|
||||
)
|
||||
), err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
@ -413,23 +594,34 @@ proc decode*(
|
||||
continue
|
||||
|
||||
trace "Erasure decoding data"
|
||||
try:
|
||||
if err =? (
|
||||
await self.asyncDecode(
|
||||
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
|
||||
)
|
||||
).errorOption:
|
||||
return failure(err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
without recovered =? await asyncDecode(self.taskpool, decoder, data, parity, encoded.blockSize.int), err:
|
||||
trace "Error decoding data", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
for i in 0..<encoded.ecK:
|
||||
for i in 0 ..< encoded.ecK:
|
||||
let idx = i * encoded.steps + step
|
||||
if data[i].len <= 0 and not cids[idx].isEmpty:
|
||||
without blk =? bt.Block.new(recovered[i]), error:
|
||||
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
|
||||
|
||||
without blk =? bt.Block.new(
|
||||
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
|
||||
), error:
|
||||
trace "Unable to create block!", exc = error.msg
|
||||
return failure(error)
|
||||
|
||||
trace "Recovered block", cid = blk.cid, index = i
|
||||
if isErr (await self.store.putBlock(blk)):
|
||||
trace "Unable to store block!", cid = blk.cid
|
||||
if error =? (await self.store.putBlock(blk)).errorOption:
|
||||
warn "Unable to store block!", cid = blk.cid, msg = error.msg
|
||||
return failure("Unable to store block!")
|
||||
|
||||
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
|
||||
|
||||
cids[idx] = blk.cid
|
||||
recoveredIndices.add(idx)
|
||||
except CancelledError as exc:
|
||||
@ -441,25 +633,78 @@ proc decode*(
|
||||
finally:
|
||||
decoder.release()
|
||||
|
||||
without tree =? CodexTree.init(cids[0..<encoded.originalBlocksCount]), err:
|
||||
return (cids, recoveredIndices).success
|
||||
|
||||
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
## Decode a protected manifest into it's original
|
||||
## manifest
|
||||
##
|
||||
## `encoded` - the encoded (protected) manifest to
|
||||
## be recovered
|
||||
##
|
||||
|
||||
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
|
||||
return failure(err)
|
||||
|
||||
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if treeCid != encoded.originalTreeCid:
|
||||
return failure("Original tree root differs from the tree root computed out of recovered data")
|
||||
return failure(
|
||||
"Original tree root differs from the tree root computed out of recovered data"
|
||||
)
|
||||
|
||||
let idxIter = Iter[Natural].new(recoveredIndices)
|
||||
.filter((i: Natural) => i < tree.leavesCount)
|
||||
let idxIter =
|
||||
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
|
||||
|
||||
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
||||
return failure(err)
|
||||
return failure(err)
|
||||
|
||||
let decoded = Manifest.new(encoded)
|
||||
|
||||
return decoded.success
|
||||
|
||||
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
|
||||
## Repair a protected manifest by reconstructing the full dataset
|
||||
##
|
||||
## `encoded` - the encoded (protected) manifest to
|
||||
## be repaired
|
||||
##
|
||||
|
||||
without (cids, _) =? (await self.decodeInternal(encoded)), err:
|
||||
return failure(err)
|
||||
|
||||
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if treeCid != encoded.originalTreeCid:
|
||||
return failure(
|
||||
"Original tree root differs from the tree root computed out of recovered data"
|
||||
)
|
||||
|
||||
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
without repaired =? (
|
||||
await self.encode(
|
||||
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
|
||||
)
|
||||
), err:
|
||||
return failure(err)
|
||||
|
||||
if repaired.treeCid != encoded.treeCid:
|
||||
return failure(
|
||||
"Original tree root differs from the repaired tree root encoded out of recovered data"
|
||||
)
|
||||
|
||||
return success()
|
||||
|
||||
proc start*(self: Erasure) {.async.} =
|
||||
return
|
||||
|
||||
@ -467,16 +712,17 @@ proc stop*(self: Erasure) {.async.} =
|
||||
return
|
||||
|
||||
proc new*(
|
||||
T: type Erasure,
|
||||
store: BlockStore,
|
||||
encoderProvider: EncoderProvider,
|
||||
decoderProvider: DecoderProvider,
|
||||
taskpool: Taskpool): Erasure =
|
||||
T: type Erasure,
|
||||
store: BlockStore,
|
||||
encoderProvider: EncoderProvider,
|
||||
decoderProvider: DecoderProvider,
|
||||
taskPool: Taskpool,
|
||||
): Erasure =
|
||||
## Create a new Erasure instance for encoding and decoding manifests
|
||||
##
|
||||
|
||||
Erasure(
|
||||
store: store,
|
||||
encoderProvider: encoderProvider,
|
||||
decoderProvider: decoderProvider,
|
||||
taskpool: taskpool)
|
||||
taskPool: taskPool,
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,9 +7,13 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/options
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/stew/results
|
||||
import std/options
|
||||
import std/sugar
|
||||
import std/sequtils
|
||||
|
||||
import pkg/results
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -19,14 +23,18 @@ type
|
||||
CodexError* = object of CatchableError # base codex error
|
||||
CodexResult*[T] = Result[T, ref CodexError]
|
||||
|
||||
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
|
||||
|
||||
template mapFailure*[T, V, E](
|
||||
exp: Result[T, V],
|
||||
exc: typedesc[E],
|
||||
exp: Result[T, V], exc: typedesc[E]
|
||||
): Result[T, ref CatchableError] =
|
||||
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
||||
##
|
||||
|
||||
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
|
||||
exp.mapErr(
|
||||
proc(e: V): ref CatchableError =
|
||||
(ref exc)(msg: $e)
|
||||
)
|
||||
|
||||
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
|
||||
mapFailure(exp, CodexError)
|
||||
@ -38,12 +46,43 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
||||
else:
|
||||
T.failure("Option is None")
|
||||
|
||||
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
||||
try:
|
||||
await allFuturesThrowing(fut)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
return failure(exc.msg)
|
||||
proc allFinishedFailed*[T](
|
||||
futs: auto
|
||||
): Future[FinishedFailed[T]] {.async: (raises: [CancelledError]).} =
|
||||
## Check if all futures have finished or failed
|
||||
##
|
||||
## TODO: wip, not sure if we want this - at the minimum,
|
||||
## we should probably avoid the async transform
|
||||
|
||||
return success()
|
||||
var res: FinishedFailed[T] = (@[], @[])
|
||||
await allFutures(futs)
|
||||
for f in futs:
|
||||
if f.failed:
|
||||
res.failure.add f
|
||||
else:
|
||||
res.success.add f
|
||||
|
||||
return res
|
||||
|
||||
proc allFinishedValues*[T](
|
||||
futs: auto
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
## If all futures have finished, return corresponding values,
|
||||
## otherwise return failure
|
||||
##
|
||||
|
||||
# wait for all futures to be either completed, failed or canceled
|
||||
await allFutures(futs)
|
||||
|
||||
let numOfFailed = futs.countIt(it.failed)
|
||||
|
||||
if numOfFailed > 0:
|
||||
return failure "Some futures failed (" & $numOfFailed & "))"
|
||||
|
||||
# here, we know there are no failed futures in "futs"
|
||||
# and we are only interested in those that completed successfully
|
||||
let values = collect:
|
||||
for b in futs:
|
||||
if b.finished:
|
||||
b.value
|
||||
return success values
|
||||
|
||||
@ -10,7 +10,7 @@ type
|
||||
# 0 => 0, 1, 2
|
||||
# 1 => 3, 4, 5
|
||||
# 2 => 6, 7, 8
|
||||
LinearStrategy,
|
||||
LinearStrategy
|
||||
|
||||
# Stepped indexing:
|
||||
# 0 => 0, 3, 6
|
||||
@ -21,77 +21,106 @@ type
|
||||
# Representing a strategy for grouping indices (of blocks usually)
|
||||
# Given an interation-count as input, will produce a seq of
|
||||
# selected indices.
|
||||
|
||||
IndexingError* = object of CodexError
|
||||
IndexingWrongIndexError* = object of IndexingError
|
||||
IndexingWrongIterationsError* = object of IndexingError
|
||||
IndexingWrongGroupCountError* = object of IndexingError
|
||||
IndexingWrongPadBlockCountError* = object of IndexingError
|
||||
|
||||
IndexingStrategy* = object
|
||||
strategyType*: StrategyType
|
||||
firstIndex*: int # Lowest index that can be returned
|
||||
lastIndex*: int # Highest index that can be returned
|
||||
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
|
||||
step*: int
|
||||
strategyType*: StrategyType # Indexing strategy algorithm
|
||||
firstIndex*: int # Lowest index that can be returned
|
||||
lastIndex*: int # Highest index that can be returned
|
||||
iterations*: int # Number of iteration steps (0 ..< iterations)
|
||||
step*: int # Step size between generated indices
|
||||
groupCount*: int # Number of groups to partition indices into
|
||||
padBlockCount*: int # Number of padding blocks to append per group
|
||||
|
||||
func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} =
|
||||
func checkIteration(
|
||||
self: IndexingStrategy, iteration: int
|
||||
): void {.raises: [IndexingError].} =
|
||||
if iteration >= self.iterations:
|
||||
raise newException(
|
||||
IndexingError,
|
||||
"Indexing iteration can't be greater than or equal to iterations.")
|
||||
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
|
||||
)
|
||||
|
||||
func getIter(first, last, step: int): Iter[int] =
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(first, last, step)
|
||||
|
||||
func getLinearIndicies(
|
||||
self: IndexingStrategy,
|
||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
|
||||
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
let
|
||||
first = self.firstIndex + iteration * self.step
|
||||
last = min(first + self.step - 1, self.lastIndex)
|
||||
|
||||
getIter(first, last, 1)
|
||||
|
||||
func getSteppedIndicies(
|
||||
self: IndexingStrategy,
|
||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
|
||||
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
let
|
||||
first = self.firstIndex + iteration
|
||||
last = self.lastIndex
|
||||
|
||||
getIter(first, last, self.iterations)
|
||||
|
||||
func getIndicies*(
|
||||
self: IndexingStrategy,
|
||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
||||
|
||||
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
case self.strategyType
|
||||
of StrategyType.LinearStrategy:
|
||||
self.getLinearIndicies(iteration)
|
||||
self.getLinearIndices(iteration)
|
||||
of StrategyType.SteppedStrategy:
|
||||
self.getSteppedIndicies(iteration)
|
||||
self.getSteppedIndices(iteration)
|
||||
|
||||
func getIndices*(
|
||||
self: IndexingStrategy, iteration: int
|
||||
): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(
|
||||
iterator (): int {.gcsafe.} =
|
||||
for value in self.getStrategyIndices(iteration):
|
||||
yield value
|
||||
|
||||
for i in 0 ..< self.padBlockCount:
|
||||
yield self.lastIndex + (iteration + 1) + i * self.groupCount
|
||||
|
||||
)
|
||||
|
||||
func init*(
|
||||
strategy: StrategyType,
|
||||
firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} =
|
||||
|
||||
strategy: StrategyType,
|
||||
firstIndex, lastIndex, iterations: int,
|
||||
groupCount = 0,
|
||||
padBlockCount = 0,
|
||||
): IndexingStrategy {.raises: [IndexingError].} =
|
||||
if firstIndex > lastIndex:
|
||||
raise newException(
|
||||
IndexingWrongIndexError,
|
||||
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")")
|
||||
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
|
||||
")",
|
||||
)
|
||||
|
||||
if iterations <= 0:
|
||||
raise newException(
|
||||
IndexingWrongIterationsError,
|
||||
"iterations (" & $iterations & ") must be greater than zero.")
|
||||
"iterations (" & $iterations & ") must be greater than zero.",
|
||||
)
|
||||
|
||||
if padBlockCount < 0:
|
||||
raise newException(
|
||||
IndexingWrongPadBlockCountError,
|
||||
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
|
||||
)
|
||||
|
||||
if padBlockCount > 0 and groupCount <= 0:
|
||||
raise newException(
|
||||
IndexingWrongGroupCountError,
|
||||
"groupCount (" & $groupCount & ") must be greater than zero.",
|
||||
)
|
||||
|
||||
IndexingStrategy(
|
||||
strategyType: strategy,
|
||||
firstIndex: firstIndex,
|
||||
lastIndex: lastIndex,
|
||||
iterations: iterations,
|
||||
step: divUp((lastIndex - firstIndex + 1), iterations))
|
||||
step: divUp((lastIndex - firstIndex + 1), iterations),
|
||||
groupCount: groupCount,
|
||||
padBlockCount: padBlockCount,
|
||||
)
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
## 4. Remove usages of `nim-json-serialization` from the codebase
|
||||
## 5. Remove need to declare `writeValue` for new types
|
||||
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
|
||||
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
|
||||
## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
|
||||
##
|
||||
## When declaring a new type, one should consider importing the `codex/logutils`
|
||||
## module, and specifying `formatIt`. If textlines log output and json log output
|
||||
@ -98,7 +98,6 @@ import pkg/questionable/results
|
||||
import ./utils/json except formatIt # TODO: remove exception?
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/upraises
|
||||
|
||||
export byteutils
|
||||
export chronicles except toJson, formatIt, `%`
|
||||
@ -107,7 +106,6 @@ export sequtils
|
||||
export json except formatIt
|
||||
export strutils
|
||||
export sugar
|
||||
export upraises
|
||||
export results
|
||||
|
||||
func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
|
||||
@ -125,8 +123,9 @@ func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
|
||||
short
|
||||
|
||||
func shortHexLog*(long: string): string =
|
||||
if long[0..1] == "0x": result &= "0x"
|
||||
result &= long[2..long.high].shortLog("..", 4, 4)
|
||||
if long[0 .. 1] == "0x":
|
||||
result &= "0x"
|
||||
result &= long[2 .. long.high].shortLog("..", 4, 4)
|
||||
|
||||
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
|
||||
v.to0xHex.shortHexLog
|
||||
@ -153,7 +152,7 @@ proc formatTextLineSeq*(val: seq[string]): string =
|
||||
template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
||||
# Provides formatters for logging with Chronicles for the given type and
|
||||
# `LogFormat`.
|
||||
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden
|
||||
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overridden
|
||||
# since the base `setProperty` is generic using `auto` and conflicts with
|
||||
# providing a generic `seq` and `Option` override.
|
||||
when format == LogFormat.json:
|
||||
@ -184,12 +183,16 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
||||
let v = opts.map(opt => opt.formatJsonOption)
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) =
|
||||
proc setProperty*(
|
||||
r: var JsonRecord, key: string, val: seq[T]
|
||||
) {.raises: [ValueError, IOError].} =
|
||||
var it {.inject, used.}: T
|
||||
let v = val.map(it => body)
|
||||
setProperty(r, key, json.`%`(v))
|
||||
|
||||
proc setProperty*(r: var JsonRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||
proc setProperty*(
|
||||
r: var JsonRecord, key: string, val: T
|
||||
) {.raises: [ValueError, IOError].} =
|
||||
var it {.inject, used.}: T = val
|
||||
let v = body
|
||||
setProperty(r, key, json.`%`(v))
|
||||
@ -220,23 +223,35 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
||||
let v = opts.map(opt => opt.formatTextLineOption)
|
||||
setProperty(r, key, v.formatTextLineSeq)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) =
|
||||
proc setProperty*(
|
||||
r: var TextLineRecord, key: string, val: seq[T]
|
||||
) {.raises: [ValueError, IOError].} =
|
||||
var it {.inject, used.}: T
|
||||
let v = val.map(it => body)
|
||||
setProperty(r, key, v.formatTextLineSeq)
|
||||
|
||||
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
|
||||
proc setProperty*(
|
||||
r: var TextLineRecord, key: string, val: T
|
||||
) {.raises: [ValueError, IOError].} =
|
||||
var it {.inject, used.}: T = val
|
||||
let v = body
|
||||
setProperty(r, key, v)
|
||||
|
||||
template formatIt*(T: type, body: untyped) {.dirty.} =
|
||||
formatIt(LogFormat.textLines, T): body
|
||||
formatIt(LogFormat.json, T): body
|
||||
formatIt(LogFormat.textLines, T):
|
||||
body
|
||||
formatIt(LogFormat.json, T):
|
||||
body
|
||||
|
||||
formatIt(LogFormat.textLines, Cid): shortLog($it)
|
||||
formatIt(LogFormat.json, Cid): $it
|
||||
formatIt(UInt256): $it
|
||||
formatIt(MultiAddress): $it
|
||||
formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog
|
||||
formatIt(LogFormat.json, array[32, byte]): it.to0xHex
|
||||
formatIt(LogFormat.textLines, Cid):
|
||||
shortLog($it)
|
||||
formatIt(LogFormat.json, Cid):
|
||||
$it
|
||||
formatIt(UInt256):
|
||||
$it
|
||||
formatIt(MultiAddress):
|
||||
$it
|
||||
formatIt(LogFormat.textLines, array[32, byte]):
|
||||
it.short0xHexLog
|
||||
formatIt(LogFormat.json, array[32, byte]):
|
||||
it.to0xHex
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -9,9 +9,9 @@
|
||||
|
||||
# This module implements serialization and deserialization of Manifest
|
||||
|
||||
import pkg/upraises
|
||||
import times
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/tables
|
||||
import std/sequtils
|
||||
@ -32,7 +32,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
## multicodec container (Dag-pb) for now
|
||||
##
|
||||
|
||||
? manifest.verify()
|
||||
?manifest.verify()
|
||||
var pbNode = initProtoBuffer()
|
||||
|
||||
# NOTE: The `Data` field in the the `dag-pb`
|
||||
@ -59,6 +59,8 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
# optional hcodec: MultiCodec = 5 # Multihash codec
|
||||
# optional version: CidVersion = 6; # Cid version
|
||||
# optional ErasureInfo erasure = 7; # erasure coding info
|
||||
# optional filename: ?string = 8; # original filename
|
||||
# optional mimetype: ?string = 9; # original mimetype
|
||||
# }
|
||||
# ```
|
||||
#
|
||||
@ -70,6 +72,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
header.write(4, manifest.codec.uint32)
|
||||
header.write(5, manifest.hcodec.uint32)
|
||||
header.write(6, manifest.version.uint32)
|
||||
|
||||
if manifest.protected:
|
||||
var erasureInfo = initProtoBuffer()
|
||||
erasureInfo.write(1, manifest.ecK.uint32)
|
||||
@ -90,6 +93,12 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
erasureInfo.finish()
|
||||
header.write(7, erasureInfo)
|
||||
|
||||
if manifest.filename.isSome:
|
||||
header.write(8, manifest.filename.get())
|
||||
|
||||
if manifest.mimetype.isSome:
|
||||
header.write(9, manifest.mimetype.get())
|
||||
|
||||
pbNode.write(1, header) # set the treeCid as the data field
|
||||
pbNode.finish()
|
||||
|
||||
@ -118,6 +127,8 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
slotRoots: seq[seq[byte]]
|
||||
cellSize: uint32
|
||||
verifiableStrategy: uint32
|
||||
filename: string
|
||||
mimetype: string
|
||||
|
||||
# Decode `Header` message
|
||||
if pbNode.getField(1, pbHeader).isErr:
|
||||
@ -145,6 +156,12 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
if pbHeader.getField(7, pbErasureInfo).isErr:
|
||||
return failure("Unable to decode `erasureInfo` from manifest!")
|
||||
|
||||
if pbHeader.getField(8, filename).isErr:
|
||||
return failure("Unable to decode `filename` from manifest!")
|
||||
|
||||
if pbHeader.getField(9, mimetype).isErr:
|
||||
return failure("Unable to decode `mimetype` from manifest!")
|
||||
|
||||
let protected = pbErasureInfo.buffer.len > 0
|
||||
var verifiable = false
|
||||
if protected:
|
||||
@ -180,11 +197,13 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
|
||||
return failure("Unable to decode `verifiableStrategy` from manifest!")
|
||||
|
||||
let
|
||||
treeCid = ? Cid.init(treeCidBuf).mapFailure
|
||||
let treeCid = ?Cid.init(treeCidBuf).mapFailure
|
||||
|
||||
let
|
||||
self = if protected:
|
||||
var filenameOption = if filename.len == 0: string.none else: filename.some
|
||||
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
|
||||
|
||||
let self =
|
||||
if protected:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
@ -194,31 +213,37 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
codec = codec.MultiCodec,
|
||||
ecK = ecK.int,
|
||||
ecM = ecM.int,
|
||||
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
|
||||
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
|
||||
originalDatasetSize = originalDatasetSize.NBytes,
|
||||
strategy = StrategyType(protectedStrategy))
|
||||
else:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec)
|
||||
strategy = StrategyType(protectedStrategy),
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
)
|
||||
else:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec,
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
)
|
||||
|
||||
? self.verify()
|
||||
?self.verify()
|
||||
|
||||
if verifiable:
|
||||
let
|
||||
verifyRootCid = ? Cid.init(verifyRoot).mapFailure
|
||||
slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure)
|
||||
verifyRootCid = ?Cid.init(verifyRoot).mapFailure
|
||||
slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure)
|
||||
|
||||
return Manifest.new(
|
||||
manifest = self,
|
||||
verifyRoot = verifyRootCid,
|
||||
slotRoots = slotRootCids,
|
||||
cellSize = cellSize.NBytes,
|
||||
strategy = StrategyType(verifiableStrategy)
|
||||
strategy = StrategyType(verifiableStrategy),
|
||||
)
|
||||
|
||||
self.success
|
||||
@ -227,7 +252,7 @@ func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
||||
## Decode a manifest using `decoder`
|
||||
##
|
||||
|
||||
if not ? blk.cid.isManifest:
|
||||
if not ?blk.cid.isManifest:
|
||||
return failure "Cid not a manifest codec"
|
||||
|
||||
Manifest.decode(blk.data)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -9,9 +9,7 @@
|
||||
|
||||
# This module defines all operations on Manifest
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/libp2p/[cid, multihash, multicodec]
|
||||
@ -25,34 +23,36 @@ import ../blocktype
|
||||
import ../indexingstrategy
|
||||
import ../logutils
|
||||
|
||||
|
||||
# TODO: Manifest should be reworked to more concrete types,
|
||||
# perhaps using inheritance
|
||||
type
|
||||
Manifest* = ref object of RootObj
|
||||
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
codec: MultiCodec # Dataset codec
|
||||
hcodec: MultiCodec # Multihash codec
|
||||
version: CidVersion # Cid version
|
||||
case protected {.serialize.}: bool # Protected datasets have erasure coded info
|
||||
type Manifest* = ref object of RootObj
|
||||
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||
blockSize {.serialize.}: NBytes
|
||||
# Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
codec: MultiCodec # Dataset codec
|
||||
hcodec: MultiCodec # Multihash codec
|
||||
version: CidVersion # Cid version
|
||||
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
|
||||
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
|
||||
case protected {.serialize.}: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
ecK: int # Number of blocks to encode
|
||||
ecM: int # Number of resulting parity blocks
|
||||
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
||||
originalDatasetSize: NBytes
|
||||
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
case verifiable {.serialize.}: bool
|
||||
# Verifiable datasets can be used to generate storage proofs
|
||||
of true:
|
||||
ecK: int # Number of blocks to encode
|
||||
ecM: int # Number of resulting parity blocks
|
||||
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
||||
originalDatasetSize: NBytes
|
||||
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs
|
||||
of true:
|
||||
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
||||
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
||||
cellSize: NBytes # Size of each slot cell
|
||||
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
else:
|
||||
discard
|
||||
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
||||
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
||||
cellSize: NBytes # Size of each slot cell
|
||||
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
else:
|
||||
discard
|
||||
else:
|
||||
discard
|
||||
|
||||
############################################################
|
||||
# Accessors
|
||||
@ -121,12 +121,18 @@ func verifiableStrategy*(self: Manifest): StrategyType =
|
||||
func numSlotBlocks*(self: Manifest): int =
|
||||
divUp(self.blocksCount, self.numSlots)
|
||||
|
||||
func filename*(self: Manifest): ?string =
|
||||
self.filename
|
||||
|
||||
func mimetype*(self: Manifest): ?string =
|
||||
self.mimetype
|
||||
|
||||
############################################################
|
||||
# Operations on block list
|
||||
############################################################
|
||||
|
||||
func isManifest*(cid: Cid): ?!bool =
|
||||
success (ManifestCodec == ? cid.contentType().mapFailure(CodexError))
|
||||
success (ManifestCodec == ?cid.contentType().mapFailure(CodexError))
|
||||
|
||||
func isManifest*(mc: MultiCodec): ?!bool =
|
||||
success mc == ManifestCodec
|
||||
@ -148,74 +154,77 @@ func verify*(self: Manifest): ?!void =
|
||||
##
|
||||
|
||||
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
|
||||
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
||||
return
|
||||
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
||||
|
||||
return success()
|
||||
|
||||
func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
|
||||
self.treeCid.success
|
||||
|
||||
func `==`*(a, b: Manifest): bool =
|
||||
(a.treeCid == b.treeCid) and
|
||||
(a.datasetSize == b.datasetSize) and
|
||||
(a.blockSize == b.blockSize) and
|
||||
(a.version == b.version) and
|
||||
(a.hcodec == b.hcodec) and
|
||||
(a.codec == b.codec) and
|
||||
(a.protected == b.protected) and
|
||||
(if a.protected:
|
||||
(a.ecK == b.ecK) and
|
||||
(a.ecM == b.ecM) and
|
||||
(a.originalTreeCid == b.originalTreeCid) and
|
||||
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||
(a.protectedStrategy == b.protectedStrategy) and
|
||||
(a.verifiable == b.verifiable) and
|
||||
(if a.verifiable:
|
||||
(a.verifyRoot == b.verifyRoot) and
|
||||
(a.slotRoots == b.slotRoots) and
|
||||
(a.cellSize == b.cellSize) and
|
||||
(a.verifiableStrategy == b.verifiableStrategy)
|
||||
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
|
||||
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
|
||||
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
|
||||
(a.mimetype == b.mimetype) and (
|
||||
if a.protected:
|
||||
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
|
||||
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
|
||||
(
|
||||
if a.verifiable:
|
||||
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
|
||||
(a.cellSize == b.cellSize) and (
|
||||
a.verifiableStrategy == b.verifiableStrategy
|
||||
)
|
||||
else:
|
||||
true)
|
||||
true
|
||||
)
|
||||
else:
|
||||
true)
|
||||
true
|
||||
)
|
||||
|
||||
func `$`*(self: Manifest): string =
|
||||
"treeCid: " & $self.treeCid &
|
||||
", datasetSize: " & $self.datasetSize &
|
||||
", blockSize: " & $self.blockSize &
|
||||
", version: " & $self.version &
|
||||
", hcodec: " & $self.hcodec &
|
||||
", codec: " & $self.codec &
|
||||
", protected: " & $self.protected &
|
||||
(if self.protected:
|
||||
", ecK: " & $self.ecK &
|
||||
", ecM: " & $self.ecM &
|
||||
", originalTreeCid: " & $self.originalTreeCid &
|
||||
", originalDatasetSize: " & $self.originalDatasetSize &
|
||||
", verifiable: " & $self.verifiable &
|
||||
(if self.verifiable:
|
||||
", verifyRoot: " & $self.verifyRoot &
|
||||
", slotRoots: " & $self.slotRoots
|
||||
else:
|
||||
"")
|
||||
result =
|
||||
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
|
||||
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
|
||||
", codec: " & $self.codec & ", protected: " & $self.protected
|
||||
|
||||
if self.filename.isSome:
|
||||
result &= ", filename: " & $self.filename
|
||||
|
||||
if self.mimetype.isSome:
|
||||
result &= ", mimetype: " & $self.mimetype
|
||||
|
||||
result &= (
|
||||
if self.protected:
|
||||
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
|
||||
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
|
||||
", verifiable: " & $self.verifiable & (
|
||||
if self.verifiable:
|
||||
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
|
||||
else:
|
||||
""
|
||||
)
|
||||
else:
|
||||
"")
|
||||
""
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
############################################################
|
||||
# Constructors
|
||||
############################################################
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
treeCid: Cid,
|
||||
blockSize: NBytes,
|
||||
datasetSize: NBytes,
|
||||
version: CidVersion = CIDv1,
|
||||
hcodec = Sha256HashCodec,
|
||||
codec = BlockCodec,
|
||||
protected = false): Manifest =
|
||||
|
||||
T: type Manifest,
|
||||
treeCid: Cid,
|
||||
blockSize: NBytes,
|
||||
datasetSize: NBytes,
|
||||
version: CidVersion = CIDv1,
|
||||
hcodec = Sha256HashCodec,
|
||||
codec = BlockCodec,
|
||||
protected = false,
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
): Manifest =
|
||||
T(
|
||||
treeCid: treeCid,
|
||||
blockSize: blockSize,
|
||||
@ -223,15 +232,19 @@ func new*(
|
||||
version: version,
|
||||
codec: codec,
|
||||
hcodec: hcodec,
|
||||
protected: protected)
|
||||
protected: protected,
|
||||
filename: filename,
|
||||
mimetype: mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
ecK, ecM: int,
|
||||
strategy = SteppedStrategy): Manifest =
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
ecK, ecM: int,
|
||||
strategy = SteppedStrategy,
|
||||
): Manifest =
|
||||
## Create an erasure protected dataset from an
|
||||
## unprotected one
|
||||
##
|
||||
@ -244,14 +257,16 @@ func new*(
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: true,
|
||||
ecK: ecK, ecM: ecM,
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
originalTreeCid: manifest.treeCid,
|
||||
originalDatasetSize: manifest.datasetSize,
|
||||
protectedStrategy: strategy)
|
||||
protectedStrategy: strategy,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest): Manifest =
|
||||
func new*(T: type Manifest, manifest: Manifest): Manifest =
|
||||
## Create an unprotected dataset from an
|
||||
## erasure protected one
|
||||
##
|
||||
@ -263,22 +278,27 @@ func new*(
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: false)
|
||||
protected: false,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
blockSize: NBytes,
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
codec: MultiCodec,
|
||||
ecK: int,
|
||||
ecM: int,
|
||||
originalTreeCid: Cid,
|
||||
originalDatasetSize: NBytes,
|
||||
strategy = SteppedStrategy): Manifest =
|
||||
|
||||
T: type Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
blockSize: NBytes,
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
codec: MultiCodec,
|
||||
ecK: int,
|
||||
ecM: int,
|
||||
originalTreeCid: Cid,
|
||||
originalDatasetSize: NBytes,
|
||||
strategy = SteppedStrategy,
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
): Manifest =
|
||||
Manifest(
|
||||
treeCid: treeCid,
|
||||
datasetSize: datasetSize,
|
||||
@ -291,26 +311,30 @@ func new*(
|
||||
ecM: ecM,
|
||||
originalTreeCid: originalTreeCid,
|
||||
originalDatasetSize: originalDatasetSize,
|
||||
protectedStrategy: strategy)
|
||||
protectedStrategy: strategy,
|
||||
filename: filename,
|
||||
mimetype: mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
verifyRoot: Cid,
|
||||
slotRoots: openArray[Cid],
|
||||
cellSize = DefaultCellSize,
|
||||
strategy = LinearStrategy): ?!Manifest =
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
verifyRoot: Cid,
|
||||
slotRoots: openArray[Cid],
|
||||
cellSize = DefaultCellSize,
|
||||
strategy = LinearStrategy,
|
||||
): ?!Manifest =
|
||||
## Create a verifiable dataset from an
|
||||
## protected one
|
||||
##
|
||||
|
||||
if not manifest.protected:
|
||||
return failure newException(
|
||||
CodexError, "Can create verifiable manifest only from protected manifest.")
|
||||
CodexError, "Can create verifiable manifest only from protected manifest."
|
||||
)
|
||||
|
||||
if slotRoots.len != manifest.numSlots:
|
||||
return failure newException(
|
||||
CodexError, "Wrong number of slot roots.")
|
||||
return failure newException(CodexError, "Wrong number of slot roots.")
|
||||
|
||||
success Manifest(
|
||||
treeCid: manifest.treeCid,
|
||||
@ -329,11 +353,12 @@ func new*(
|
||||
verifyRoot: verifyRoot,
|
||||
slotRoots: @slotRoots,
|
||||
cellSize: cellSize,
|
||||
verifiableStrategy: strategy)
|
||||
verifiableStrategy: strategy,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
data: openArray[byte]): ?!Manifest =
|
||||
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
## Create a manifest instance from given data
|
||||
##
|
||||
|
||||
|
||||
295
codex/market.nim
295
codex/market.nim
@ -1,5 +1,4 @@
|
||||
import pkg/chronos
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/ethers/erc20
|
||||
import ./contracts/requests
|
||||
@ -18,17 +17,20 @@ export periods
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
MarketError* = object of CodexError
|
||||
SlotStateMismatchError* = object of MarketError
|
||||
SlotReservationNotAllowedError* = object of MarketError
|
||||
ProofInvalidError* = object of MarketError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* = proc(id: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) {.gcsafe, upraises:[].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnSlotReservationsFull* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].}
|
||||
OnRequest* =
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSlotReservationsFull* =
|
||||
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
|
||||
ProofChallenge* = array[32, byte]
|
||||
|
||||
# Marketplace events -- located here due to the Market abstraction
|
||||
@ -36,38 +38,68 @@ type
|
||||
StorageRequested* = object of MarketplaceEvent
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: UInt256
|
||||
expiry*: uint64
|
||||
|
||||
SlotFilled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
|
||||
SlotFreed* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
|
||||
SlotReservationsFull* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
|
||||
RequestFulfilled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
|
||||
RequestCancelled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
|
||||
RequestFailed* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
|
||||
ProofSubmitted* = object of MarketplaceEvent
|
||||
id*: SlotId
|
||||
|
||||
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
|
||||
method loadConfig*(
|
||||
market: Market
|
||||
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||
method getZkeyHash*(
|
||||
market: Market
|
||||
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
||||
method getSigner*(
|
||||
market: Market
|
||||
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofTimeout*(market: Market): Future[UInt256] {.base, async.} =
|
||||
method periodicity*(
|
||||
market: Market
|
||||
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
||||
method proofTimeout*(
|
||||
market: Market
|
||||
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method repairRewardPercentage*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||
@ -78,8 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
||||
let pntr = await market.getPointer(slotId)
|
||||
return pntr < downtime
|
||||
|
||||
method requestStorage*(market: Market,
|
||||
request: StorageRequest) {.base, async.} =
|
||||
method requestStorage*(
|
||||
market: Market, request: StorageRequest
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||
@ -88,163 +121,193 @@ method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequest*(market: Market,
|
||||
id: RequestId):
|
||||
Future[?StorageRequest] {.base, async.} =
|
||||
method getRequest*(
|
||||
market: Market, id: RequestId
|
||||
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestState*(market: Market,
|
||||
requestId: RequestId): Future[?RequestState] {.base, async.} =
|
||||
method requestState*(
|
||||
market: Market, requestId: RequestId
|
||||
): Future[?RequestState] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotState*(market: Market,
|
||||
slotId: SlotId): Future[SlotState] {.base, async.} =
|
||||
method slotState*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequestEnd*(market: Market,
|
||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
||||
method getRequestEnd*(
|
||||
market: Market, id: RequestId
|
||||
): Future[SecondsSince1970] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestExpiresAt*(market: Market,
|
||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
||||
method requestExpiresAt*(
|
||||
market: Market, id: RequestId
|
||||
): Future[SecondsSince1970] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getHost*(market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256): Future[?Address] {.base, async.} =
|
||||
method getHost*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getActiveSlot*(
|
||||
market: Market,
|
||||
slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||
|
||||
method currentCollateral*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method fillSlot*(market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256) {.base, async.} =
|
||||
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
||||
method fillSlot*(
|
||||
market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method withdrawFunds*(market: Market,
|
||||
requestId: RequestId) {.base, async.} =
|
||||
method freeSlot*(
|
||||
market: Market, slotId: SlotId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequests*(market: Market,
|
||||
callback: OnRequest):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method withdrawFunds*(
|
||||
market: Market, requestId: RequestId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method isProofRequired*(market: Market,
|
||||
id: SlotId): Future[bool] {.base, async.} =
|
||||
method subscribeRequests*(
|
||||
market: Market, callback: OnRequest
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method willProofBeRequired*(market: Market,
|
||||
id: SlotId): Future[bool] {.base, async.} =
|
||||
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} =
|
||||
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method submitProof*(market: Market,
|
||||
id: SlotId,
|
||||
proof: Groth16Proof) {.base, async.} =
|
||||
method getChallenge*(
|
||||
market: Market, id: SlotId
|
||||
): Future[ProofChallenge] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method markProofAsMissing*(market: Market,
|
||||
id: SlotId,
|
||||
period: Period) {.base, async.} =
|
||||
method submitProof*(
|
||||
market: Market, id: SlotId, proof: Groth16Proof
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canProofBeMarkedAsMissing*(market: Market,
|
||||
id: SlotId,
|
||||
period: Period): Future[bool] {.base, async.} =
|
||||
method markProofAsMissing*(
|
||||
market: Market, id: SlotId, period: Period
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canMarkProofAsMissing*(
|
||||
market: Market, id: SlotId, period: Period
|
||||
): Future[bool] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method reserveSlot*(
|
||||
market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256) {.base, async.} =
|
||||
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canReserveSlot*(
|
||||
market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256): Future[bool] {.base, async.} =
|
||||
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(market: Market,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeFulfillment*(
|
||||
market: Market, callback: OnFulfillment
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeFulfillment*(
|
||||
market: Market, requestId: RequestId, callback: OnFulfillment
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(market: Market,
|
||||
callback: OnSlotFilled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeSlotFilled*(
|
||||
market: Market, callback: OnSlotFilled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
callback: OnSlotFilled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeSlotFilled*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFreed*(market: Market,
|
||||
callback: OnSlotFreed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeSlotFreed*(
|
||||
market: Market, callback: OnSlotFreed
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotReservationsFull*(
|
||||
market: Market,
|
||||
callback: OnSlotReservationsFull): Future[Subscription] {.base, async.} =
|
||||
|
||||
market: Market, callback: OnSlotReservationsFull
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(market: Market,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeRequestCancelled*(
|
||||
market: Market, callback: OnRequestCancelled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeRequestCancelled*(
|
||||
market: Market, requestId: RequestId, callback: OnRequestCancelled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(market: Market,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeRequestFailed*(
|
||||
market: Market, callback: OnRequestFailed
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeRequestFailed*(
|
||||
market: Market, requestId: RequestId, callback: OnRequestFailed
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeProofSubmission*(market: Market,
|
||||
callback: OnProofSubmitted):
|
||||
Future[Subscription] {.base, async.} =
|
||||
method subscribeProofSubmission*(
|
||||
market: Market, callback: OnProofSubmitted
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
|
||||
method unsubscribe*(subscription: Subscription) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastEvents*[T: MarketplaceEvent](
|
||||
market: Market,
|
||||
_: type T,
|
||||
blocksAgo: int): Future[seq[T]] {.base, async.} =
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market, fromBlock: BlockTag
|
||||
): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market, blocksAgo: int
|
||||
): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market, fromTime: SecondsSince1970
|
||||
): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: Market, fromBlock: BlockTag
|
||||
): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: Market, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.base, gcsafe, raises: [].} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
@ -26,11 +24,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
|
||||
const MaxMerkleProofSize = 1.MiBs.uint
|
||||
|
||||
proc encode*(self: CodexTree): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.leavesCount.uint64)
|
||||
for node in self.nodes:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(3, nodesPb)
|
||||
@ -39,11 +37,11 @@ proc encode*(self: CodexTree): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var leavesCount: uint64
|
||||
discard ? pb.getField(1, mcodecCode).mapFailure
|
||||
discard ? pb.getField(2, leavesCount).mapFailure
|
||||
discard ?pb.getField(1, mcodecCode).mapFailure
|
||||
discard ?pb.getField(2, leavesCount).mapFailure
|
||||
|
||||
let mcodec = MultiCodec.codec(mcodecCode.int)
|
||||
if mcodec == InvalidMultiCodec:
|
||||
@ -53,22 +51,22 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
nodesBuff: seq[seq[byte]]
|
||||
nodes: seq[ByteHash]
|
||||
|
||||
if ? pb.getRepeatedField(3, nodesBuff).mapFailure:
|
||||
if ?pb.getRepeatedField(3, nodesBuff).mapFailure:
|
||||
for nodeBuff in nodesBuff:
|
||||
var node: ByteHash
|
||||
discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure
|
||||
discard ?initProtoBuffer(nodeBuff).getField(1, node).mapFailure
|
||||
nodes.add node
|
||||
|
||||
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||
|
||||
proc encode*(self: CodexProof): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.index.uint64)
|
||||
pb.write(3, self.nleaves.uint64)
|
||||
|
||||
for node in self.path:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(4, nodesPb)
|
||||
@ -77,36 +75,33 @@ proc encode*(self: CodexProof): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var index: uint64
|
||||
var nleaves: uint64
|
||||
discard ? pb.getField(1, mcodecCode).mapFailure
|
||||
discard ?pb.getField(1, mcodecCode).mapFailure
|
||||
|
||||
let mcodec = MultiCodec.codec(mcodecCode.int)
|
||||
if mcodec == InvalidMultiCodec:
|
||||
return failure("Invalid MultiCodec code " & $mcodecCode)
|
||||
|
||||
discard ? pb.getField(2, index).mapFailure
|
||||
discard ? pb.getField(3, nleaves).mapFailure
|
||||
discard ?pb.getField(2, index).mapFailure
|
||||
discard ?pb.getField(3, nleaves).mapFailure
|
||||
|
||||
var
|
||||
nodesBuff: seq[seq[byte]]
|
||||
nodes: seq[ByteHash]
|
||||
|
||||
if ? pb.getRepeatedField(4, nodesBuff).mapFailure:
|
||||
if ?pb.getRepeatedField(4, nodesBuff).mapFailure:
|
||||
for nodeBuff in nodesBuff:
|
||||
var node: ByteHash
|
||||
let nodePb = initProtoBuffer(nodeBuff)
|
||||
discard ? nodePb.getField(1, node).mapFailure
|
||||
discard ?nodePb.getField(1, node).mapFailure
|
||||
nodes.add node
|
||||
|
||||
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
|
||||
|
||||
proc fromJson*(
|
||||
_: type CodexProof,
|
||||
json: JsonNode
|
||||
): ?!CodexProof =
|
||||
proc fromJson*(_: type CodexProof, json: JsonNode): ?!CodexProof =
|
||||
expectJsonKind(Cid, JString, json)
|
||||
var bytes: seq[byte]
|
||||
try:
|
||||
@ -116,4 +111,5 @@ proc fromJson*(
|
||||
|
||||
CodexProof.decode(bytes)
|
||||
|
||||
func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode())
|
||||
func `%`*(proof: CodexProof): JsonNode =
|
||||
%byteutils.toHex(proof.encode())
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -15,7 +15,7 @@ import std/sequtils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p/[cid, multicodec, multihash]
|
||||
|
||||
import pkg/constantine/hashes
|
||||
import ../../utils
|
||||
import ../../rng
|
||||
import ../../errors
|
||||
@ -32,10 +32,10 @@ logScope:
|
||||
|
||||
type
|
||||
ByteTreeKey* {.pure.} = enum
|
||||
KeyNone = 0x0.byte
|
||||
KeyBottomLayer = 0x1.byte
|
||||
KeyOdd = 0x2.byte
|
||||
KeyOddAndBottomLayer = 0x3.byte
|
||||
KeyNone = 0x0.byte
|
||||
KeyBottomLayer = 0x1.byte
|
||||
KeyOdd = 0x2.byte
|
||||
KeyOddAndBottomLayer = 0x3.byte
|
||||
|
||||
ByteHash* = seq[byte]
|
||||
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
|
||||
@ -47,26 +47,10 @@ type
|
||||
CodexProof* = ref object of ByteProof
|
||||
mcodec*: MultiCodec
|
||||
|
||||
func mhash*(mcodec: MultiCodec): ?!MHash =
|
||||
let
|
||||
mhash = CodeHashes.getOrDefault(mcodec)
|
||||
|
||||
if isNil(mhash.coder):
|
||||
return failure "Invalid multihash codec"
|
||||
|
||||
success mhash
|
||||
|
||||
func digestSize*(self: (CodexTree or CodexProof)): int =
|
||||
## Number of leaves
|
||||
##
|
||||
|
||||
self.mhash.size
|
||||
|
||||
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
||||
var
|
||||
proof = CodexProof(mcodec: self.mcodec)
|
||||
var proof = CodexProof(mcodec: self.mcodec)
|
||||
|
||||
? self.getProof(index, proof)
|
||||
?self.getProof(index, proof)
|
||||
|
||||
success proof
|
||||
|
||||
@ -78,137 +62,113 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
|
||||
rootBytes = root.digestBytes
|
||||
leafBytes = leaf.digestBytes
|
||||
|
||||
if self.mcodec != root.mcodec or
|
||||
self.mcodec != leaf.mcodec:
|
||||
if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec:
|
||||
return failure "Hash codec mismatch"
|
||||
|
||||
if rootBytes.len != root.size and
|
||||
leafBytes.len != leaf.size:
|
||||
if rootBytes.len != root.size and leafBytes.len != leaf.size:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
self.verify(leafBytes, rootBytes)
|
||||
|
||||
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
|
||||
self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure)
|
||||
self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure)
|
||||
|
||||
proc rootCid*(
|
||||
self: CodexTree,
|
||||
version = CIDv1,
|
||||
dataCodec = DatasetRootCodec): ?!Cid =
|
||||
|
||||
if (? self.root).len == 0:
|
||||
proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid =
|
||||
if (?self.root).len == 0:
|
||||
return failure "Empty root"
|
||||
|
||||
let
|
||||
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
|
||||
let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure
|
||||
|
||||
Cid.init(version, DatasetRootCodec, mhash).mapFailure
|
||||
|
||||
func getLeafCid*(
|
||||
self: CodexTree,
|
||||
i: Natural,
|
||||
version = CIDv1,
|
||||
dataCodec = BlockCodec): ?!Cid =
|
||||
|
||||
self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec
|
||||
): ?!Cid =
|
||||
if i >= self.leavesCount:
|
||||
return failure "Invalid leaf index " & $i
|
||||
|
||||
let
|
||||
leaf = self.leaves[i]
|
||||
mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure
|
||||
mhash = ?MultiHash.init($self.mcodec, leaf).mapFailure
|
||||
|
||||
Cid.init(version, dataCodec, mhash).mapFailure
|
||||
|
||||
proc `$`*(self: CodexTree): string =
|
||||
let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none"
|
||||
"CodexTree(" &
|
||||
" root: " & root &
|
||||
", leavesCount: " & $self.leavesCount &
|
||||
", levels: " & $self.levels &
|
||||
", mcodec: " & $self.mcodec & " )"
|
||||
let root =
|
||||
if self.root.isOk:
|
||||
byteutils.toHex(self.root.get)
|
||||
else:
|
||||
"none"
|
||||
"CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " &
|
||||
$self.levels & ", mcodec: " & $self.mcodec & " )"
|
||||
|
||||
proc `$`*(self: CodexProof): string =
|
||||
"CodexProof(" &
|
||||
" nleaves: " & $self.nleaves &
|
||||
", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt( byteutils.toHex(it) ) &
|
||||
", mcodec: " & $self.mcodec & " )"
|
||||
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
|
||||
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
|
||||
|
||||
func compress*(
|
||||
x, y: openArray[byte],
|
||||
key: ByteTreeKey,
|
||||
mhash: MHash): ?!ByteHash =
|
||||
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
|
||||
## Compress two hashes
|
||||
##
|
||||
|
||||
var digest = newSeq[byte](mhash.size)
|
||||
mhash.coder(@x & @y & @[ key.byte ], digest)
|
||||
success digest
|
||||
let input = @x & @y & @[key.byte]
|
||||
let digest = ?MultiHash.digest(codec, input).mapFailure
|
||||
success digest.digestBytes
|
||||
|
||||
func init*(
|
||||
_: type CodexTree,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
leaves: openArray[ByteHash]): ?!CodexTree =
|
||||
|
||||
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
||||
): ?!CodexTree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mhash = ? mcodec.mhash()
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
Zero: ByteHash = newSeq[byte](mhash.size)
|
||||
compress(x, y, key, mcodec)
|
||||
digestSize = ?mcodec.digestSize.mapFailure
|
||||
Zero: ByteHash = newSeq[byte](digestSize)
|
||||
|
||||
if mhash.size != leaves[0].len:
|
||||
if digestSize != leaves[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
var
|
||||
self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||
|
||||
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type CodexTree,
|
||||
leaves: openArray[MultiHash]): ?!CodexTree =
|
||||
|
||||
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mcodec = leaves[0].mcodec
|
||||
leaves = leaves.mapIt( it.digestBytes )
|
||||
leaves = leaves.mapIt(it.digestBytes)
|
||||
|
||||
CodexTree.init(mcodec, leaves)
|
||||
|
||||
func init*(
|
||||
_: type CodexTree,
|
||||
leaves: openArray[Cid]): ?!CodexTree =
|
||||
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mcodec = (? leaves[0].mhash.mapFailure).mcodec
|
||||
leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes )
|
||||
mcodec = (?leaves[0].mhash.mapFailure).mcodec
|
||||
leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes)
|
||||
|
||||
CodexTree.init(mcodec, leaves)
|
||||
|
||||
proc fromNodes*(
|
||||
_: type CodexTree,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
nodes: openArray[ByteHash],
|
||||
nleaves: int): ?!CodexTree =
|
||||
|
||||
_: type CodexTree,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
nodes: openArray[ByteHash],
|
||||
nleaves: int,
|
||||
): ?!CodexTree =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
mhash = ? mcodec.mhash()
|
||||
Zero = newSeq[byte](mhash.size)
|
||||
digestSize = ?mcodec.digestSize.mapFailure
|
||||
Zero = newSeq[byte](digestSize)
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
compress(x, y, key, mcodec)
|
||||
|
||||
if mhash.size != nodes[0].len:
|
||||
if digestSize != nodes[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
var
|
||||
@ -217,34 +177,34 @@ proc fromNodes*(
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add( nodes[pos..<(pos + layer)] )
|
||||
self.layers.add(nodes[pos ..< (pos + layer)])
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
proof = ? self.getProof(index)
|
||||
proof = ?self.getProof(index)
|
||||
|
||||
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
|
||||
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
|
||||
return failure "Unable to verify tree built from nodes"
|
||||
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type CodexProof,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
index: int,
|
||||
nleaves: int,
|
||||
nodes: openArray[ByteHash]): ?!CodexProof =
|
||||
|
||||
_: type CodexProof,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
index: int,
|
||||
nleaves: int,
|
||||
nodes: openArray[ByteHash],
|
||||
): ?!CodexProof =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
mhash = ? mcodec.mhash()
|
||||
Zero = newSeq[byte](mhash.size)
|
||||
digestSize = ?mcodec.digestSize.mapFailure
|
||||
Zero = newSeq[byte](digestSize)
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
compress(x, y, key, mcodec)
|
||||
|
||||
success CodexProof(
|
||||
compress: compressor,
|
||||
@ -252,4 +212,5 @@ func init*(
|
||||
mcodec: mcodec,
|
||||
index: index,
|
||||
nleaves: nleaves,
|
||||
path: @nodes)
|
||||
path: @nodes,
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -16,19 +16,19 @@ import pkg/questionable/results
|
||||
import ../errors
|
||||
|
||||
type
|
||||
CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
|
||||
CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
|
||||
|
||||
MerkleTree*[H, K] = ref object of RootObj
|
||||
layers* : seq[seq[H]]
|
||||
layers*: seq[seq[H]]
|
||||
compress*: CompressFn[H, K]
|
||||
zero* : H
|
||||
zero*: H
|
||||
|
||||
MerkleProof*[H, K] = ref object of RootObj
|
||||
index* : int # linear index of the leaf, starting from 0
|
||||
path* : seq[H] # order: from the bottom to the top
|
||||
nleaves* : int # number of leaves in the tree (=size of input)
|
||||
compress*: CompressFn[H, K] # compress function
|
||||
zero* : H # zero value
|
||||
index*: int # linear index of the leaf, starting from 0
|
||||
path*: seq[H] # order: from the bottom to the top
|
||||
nleaves*: int # number of leaves in the tree (=size of input)
|
||||
compress*: CompressFn[H, K] # compress function
|
||||
zero*: H # zero value
|
||||
|
||||
func depth*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers.len - 1
|
||||
@ -59,36 +59,38 @@ func root*[H, K](self: MerkleTree[H, K]): ?!H =
|
||||
return success last[0]
|
||||
|
||||
func getProof*[H, K](
|
||||
self: MerkleTree[H, K],
|
||||
index: int,
|
||||
proof: MerkleProof[H, K]): ?!void =
|
||||
let depth = self.depth
|
||||
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
|
||||
): ?!void =
|
||||
let depth = self.depth
|
||||
let nleaves = self.leavesCount
|
||||
|
||||
if not (index >= 0 and index < nleaves):
|
||||
return failure "index out of bounds"
|
||||
|
||||
var path : seq[H] = newSeq[H](depth)
|
||||
var path: seq[H] = newSeq[H](depth)
|
||||
var k = index
|
||||
var m = nleaves
|
||||
for i in 0..<depth:
|
||||
for i in 0 ..< depth:
|
||||
let j = k xor 1
|
||||
path[i] = if (j < m): self.layers[i][j] else: self.zero
|
||||
k = k shr 1
|
||||
path[i] =
|
||||
if (j < m):
|
||||
self.layers[i][j]
|
||||
else:
|
||||
self.zero
|
||||
k = k shr 1
|
||||
m = (m + 1) shr 1
|
||||
|
||||
proof.index = index
|
||||
proof.path = path
|
||||
proof.path = path
|
||||
proof.nleaves = nleaves
|
||||
proof.compress = self.compress
|
||||
|
||||
success()
|
||||
|
||||
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
|
||||
var
|
||||
proof = MerkleProof[H, K]()
|
||||
var proof = MerkleProof[H, K]()
|
||||
|
||||
? self.getProof(index, proof)
|
||||
?self.getProof(index, proof)
|
||||
|
||||
success proof
|
||||
|
||||
@ -100,41 +102,39 @@ func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
|
||||
bottomFlag = K.KeyBottomLayer
|
||||
|
||||
for p in proof.path:
|
||||
let oddIndex : bool = (bitand(j,1) != 0)
|
||||
let oddIndex: bool = (bitand(j, 1) != 0)
|
||||
if oddIndex:
|
||||
# the index of the child is odd, so the node itself can't be odd (a bit counterintuitive, yeah :)
|
||||
h = ? proof.compress( p, h, bottomFlag )
|
||||
h = ?proof.compress(p, h, bottomFlag)
|
||||
else:
|
||||
if j == m - 1:
|
||||
# single child => odd node
|
||||
h = ? proof.compress( h, p, K(bottomFlag.ord + 2) )
|
||||
h = ?proof.compress(h, p, K(bottomFlag.ord + 2))
|
||||
else:
|
||||
# even node
|
||||
h = ? proof.compress( h , p, bottomFlag )
|
||||
h = ?proof.compress(h, p, bottomFlag)
|
||||
bottomFlag = K.KeyNone
|
||||
j = j shr 1
|
||||
m = (m+1) shr 1
|
||||
j = j shr 1
|
||||
m = (m + 1) shr 1
|
||||
|
||||
return success h
|
||||
|
||||
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
|
||||
success bool(root == ? proof.reconstructRoot(leaf))
|
||||
success bool(root == ?proof.reconstructRoot(leaf))
|
||||
|
||||
func merkleTreeWorker*[H, K](
|
||||
self: MerkleTree[H, K],
|
||||
xs: openArray[H],
|
||||
isBottomLayer: static bool): ?!seq[seq[H]] =
|
||||
|
||||
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
|
||||
): ?!seq[seq[H]] =
|
||||
let a = low(xs)
|
||||
let b = high(xs)
|
||||
let m = b - a + 1
|
||||
|
||||
when not isBottomLayer:
|
||||
if m == 1:
|
||||
return success @[ @xs ]
|
||||
return success @[@xs]
|
||||
|
||||
let halfn: int = m div 2
|
||||
let n : int = 2 * halfn
|
||||
let halfn: int = m div 2
|
||||
let n: int = 2 * halfn
|
||||
let isOdd: bool = (n != m)
|
||||
|
||||
var ys: seq[H]
|
||||
@ -143,11 +143,11 @@ func merkleTreeWorker*[H, K](
|
||||
else:
|
||||
ys = newSeq[H](halfn + 1)
|
||||
|
||||
for i in 0..<halfn:
|
||||
for i in 0 ..< halfn:
|
||||
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
|
||||
ys[i] = ? self.compress( xs[a + 2 * i], xs[a + 2 * i + 1], key = key )
|
||||
ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key)
|
||||
if isOdd:
|
||||
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
|
||||
ys[halfn] = ? self.compress( xs[n], self.zero, key = key )
|
||||
ys[halfn] = ?self.compress(xs[n], self.zero, key = key)
|
||||
|
||||
success @[ @xs ] & ? self.merkleTreeWorker(ys, isBottomLayer = false)
|
||||
success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -24,10 +24,10 @@ import ./merkletree
|
||||
export merkletree, poseidon2
|
||||
|
||||
const
|
||||
KeyNoneF = F.fromhex("0x0")
|
||||
KeyBottomLayerF = F.fromhex("0x1")
|
||||
KeyOddF = F.fromhex("0x2")
|
||||
KeyOddAndBottomLayerF = F.fromhex("0x3")
|
||||
KeyNoneF = F.fromHex("0x0")
|
||||
KeyBottomLayerF = F.fromHex("0x1")
|
||||
KeyOddF = F.fromHex("0x2")
|
||||
KeyOddAndBottomLayerF = F.fromHex("0x3")
|
||||
|
||||
Poseidon2Zero* = zero
|
||||
|
||||
@ -35,7 +35,7 @@ type
|
||||
Bn254Fr* = F
|
||||
Poseidon2Hash* = Bn254Fr
|
||||
|
||||
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
|
||||
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
|
||||
KeyNone
|
||||
KeyBottomLayer
|
||||
KeyOdd
|
||||
@ -46,65 +46,50 @@ type
|
||||
|
||||
proc `$`*(self: Poseidon2Tree): string =
|
||||
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||
"Poseidon2Tree(" &
|
||||
" root: " & root &
|
||||
", leavesCount: " & $self.leavesCount &
|
||||
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
|
||||
", levels: " & $self.levels & " )"
|
||||
|
||||
proc `$`*(self: Poseidon2Proof): string =
|
||||
"Poseidon2Proof(" &
|
||||
" nleaves: " & $self.nleaves &
|
||||
", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt( it.toHex ) & " )"
|
||||
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt(it.toHex) & " )"
|
||||
|
||||
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
||||
result[0..<bytes.len] = bytes[0..<bytes.len]
|
||||
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
|
||||
|
||||
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||
case key:
|
||||
case key
|
||||
of KeyNone: KeyNoneF
|
||||
of KeyBottomLayer: KeyBottomLayerF
|
||||
of KeyOdd: KeyOddF
|
||||
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Tree,
|
||||
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash,
|
||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress( x, y, key.toKey )
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var
|
||||
self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||
|
||||
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Tree,
|
||||
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||
Poseidon2Tree.init(
|
||||
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
|
||||
|
||||
proc fromNodes*(
|
||||
_: type Poseidon2Tree,
|
||||
nodes: openArray[Poseidon2Hash],
|
||||
nleaves: int): ?!Poseidon2Tree =
|
||||
|
||||
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
|
||||
): ?!Poseidon2Tree =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash,
|
||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress( x, y, key.toKey )
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var
|
||||
self = Poseidon2Tree(compress: compressor, zero: zero)
|
||||
@ -112,37 +97,34 @@ proc fromNodes*(
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add( nodes[pos..<(pos + layer)] )
|
||||
self.layers.add(nodes[pos ..< (pos + layer)])
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
proof = ? self.getProof(index)
|
||||
proof = ?self.getProof(index)
|
||||
|
||||
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
|
||||
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
|
||||
return failure "Unable to verify tree built from nodes"
|
||||
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Proof,
|
||||
index: int,
|
||||
nleaves: int,
|
||||
nodes: openArray[Poseidon2Hash]): ?!Poseidon2Proof =
|
||||
|
||||
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
|
||||
): ?!Poseidon2Proof =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash,
|
||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress( x, y, key.toKey )
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
success Poseidon2Proof(
|
||||
compress: compressor,
|
||||
zero: Poseidon2Zero,
|
||||
index: index,
|
||||
nleaves: nleaves,
|
||||
path: @nodes)
|
||||
path: @nodes,
|
||||
)
|
||||
|
||||
11
codex/multicodec_exts.nim
Normal file
11
codex/multicodec_exts.nim
Normal file
@ -0,0 +1,11 @@
|
||||
const CodecExts = [
|
||||
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
|
||||
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
|
||||
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
|
||||
("codex-manifest", 0xCD01),
|
||||
("codex-block", 0xCD02),
|
||||
("codex-root", 0xCD03),
|
||||
("codex-slot-root", 0xCD04),
|
||||
("codex-proving-root", 0xCD05),
|
||||
("codex-slot-cell", 0xCD06),
|
||||
]
|
||||
40
codex/multihash_exts.nim
Normal file
40
codex/multihash_exts.nim
Normal file
@ -0,0 +1,40 @@
|
||||
import blscurve/bls_public_exports
|
||||
import pkg/constantine/hashes
|
||||
import poseidon2
|
||||
|
||||
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
|
||||
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
|
||||
if len(output) > 0:
|
||||
let digest = hashes.sha256.hash(data)
|
||||
copyMem(addr output[0], addr digest[0], 32)
|
||||
|
||||
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.Sponge.digest(data).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
const Sha2256MultiHash* = MHash(
|
||||
mcodec: multiCodec("sha2-256"),
|
||||
size: sha256.sizeDigest,
|
||||
coder: sha2_256hash_constantine,
|
||||
)
|
||||
const HashExts = [
|
||||
# override sha2-256 hash function
|
||||
Sha2256MultiHash,
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
|
||||
size: 32,
|
||||
coder: poseidon2_sponge_rate2,
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
|
||||
size: 32,
|
||||
coder: poseidon2_merkle_2kb_sponge,
|
||||
),
|
||||
]
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -9,16 +9,17 @@
|
||||
|
||||
const
|
||||
# Namespaces
|
||||
CodexMetaNamespace* = "meta" # meta info stored here
|
||||
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
|
||||
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||
CodexMetaNamespace* = "meta" # meta info stored here
|
||||
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total"
|
||||
# number of blocks in the repo
|
||||
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
||||
CodexBlocksTtlNamespace* = # Cid TTL
|
||||
CodexBlocksTtlNamespace* = # Cid TTL
|
||||
CodexMetaNamespace & "/ttl"
|
||||
CodexBlockProofNamespace* = # Cid and Proof
|
||||
CodexBlockProofNamespace* = # Cid and Proof
|
||||
CodexMetaNamespace & "/proof"
|
||||
CodexDhtNamespace* = "dht" # Dht namespace
|
||||
CodexDhtProvidersNamespace* = # Dht providers namespace
|
||||
CodexDhtNamespace* = "dht" # Dht namespace
|
||||
CodexDhtProvidersNamespace* = # Dht providers namespace
|
||||
CodexDhtNamespace & "/providers"
|
||||
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace
|
||||
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace
|
||||
|
||||
432
codex/nat.nim
Normal file
432
codex/nat.nim
Normal file
@ -0,0 +1,432 @@
|
||||
# Copyright (c) 2019-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, os, strutils, times, net, atomics],
|
||||
stew/[objects],
|
||||
nat_traversal/[miniupnpc, natpmp],
|
||||
json_serialization/std/net,
|
||||
results
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
|
||||
import ./utils
|
||||
import ./utils/natutils
|
||||
import ./utils/addrutils
|
||||
|
||||
const
|
||||
UPNP_TIMEOUT = 200 # ms
|
||||
PORT_MAPPING_INTERVAL = 20 * 60 # seconds
|
||||
NATPMP_LIFETIME = 60 * 60 # in seconds, must be longer than PORT_MAPPING_INTERVAL
|
||||
|
||||
type PortMappings* = object
|
||||
internalTcpPort: Port
|
||||
externalTcpPort: Port
|
||||
internalUdpPort: Port
|
||||
externalUdpPort: Port
|
||||
description: string
|
||||
|
||||
type PortMappingArgs =
|
||||
tuple[strategy: NatStrategy, tcpPort, udpPort: Port, description: string]
|
||||
|
||||
type NatConfig* = object
|
||||
case hasExtIp*: bool
|
||||
of true: extIp*: IpAddress
|
||||
of false: nat*: NatStrategy
|
||||
|
||||
var
|
||||
upnp {.threadvar.}: Miniupnp
|
||||
npmp {.threadvar.}: NatPmp
|
||||
strategy = NatStrategy.NatNone
|
||||
natClosed: Atomic[bool]
|
||||
extIp: Option[IpAddress]
|
||||
activeMappings: seq[PortMappings]
|
||||
natThreads: seq[Thread[PortMappingArgs]] = @[]
|
||||
|
||||
logScope:
|
||||
topics = "nat"
|
||||
|
||||
type PrefSrcStatus = enum
|
||||
NoRoutingInfo
|
||||
PrefSrcIsPublic
|
||||
PrefSrcIsPrivate
|
||||
BindAddressIsPublic
|
||||
BindAddressIsPrivate
|
||||
|
||||
## Also does threadvar initialisation.
|
||||
## Must be called before redirectPorts() in each thread.
|
||||
proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] =
|
||||
var externalIP: IpAddress
|
||||
|
||||
if natStrategy == NatStrategy.NatAny or natStrategy == NatStrategy.NatUpnp:
|
||||
if upnp == nil:
|
||||
upnp = newMiniupnp()
|
||||
|
||||
upnp.discoverDelay = UPNP_TIMEOUT
|
||||
let dres = upnp.discover()
|
||||
if dres.isErr:
|
||||
debug "UPnP", msg = dres.error
|
||||
else:
|
||||
var
|
||||
msg: cstring
|
||||
canContinue = true
|
||||
case upnp.selectIGD()
|
||||
of IGDNotFound:
|
||||
msg = "Internet Gateway Device not found. Giving up."
|
||||
canContinue = false
|
||||
of IGDFound:
|
||||
msg = "Internet Gateway Device found."
|
||||
of IGDNotConnected:
|
||||
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
|
||||
of NotAnIGD:
|
||||
msg =
|
||||
"Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
|
||||
of IGDIpNotRoutable:
|
||||
msg =
|
||||
"Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
|
||||
if not quiet:
|
||||
debug "UPnP", msg
|
||||
if canContinue:
|
||||
let ires = upnp.externalIPAddress()
|
||||
if ires.isErr:
|
||||
debug "UPnP", msg = ires.error
|
||||
else:
|
||||
# if we got this far, UPnP is working and we don't need to try NAT-PMP
|
||||
try:
|
||||
externalIP = parseIpAddress(ires.value)
|
||||
strategy = NatStrategy.NatUpnp
|
||||
return some(externalIP)
|
||||
except ValueError as e:
|
||||
error "parseIpAddress() exception", err = e.msg
|
||||
return
|
||||
|
||||
if natStrategy == NatStrategy.NatAny or natStrategy == NatStrategy.NatPmp:
|
||||
if npmp == nil:
|
||||
npmp = newNatPmp()
|
||||
let nres = npmp.init()
|
||||
if nres.isErr:
|
||||
debug "NAT-PMP", msg = nres.error
|
||||
else:
|
||||
let nires = npmp.externalIPAddress()
|
||||
if nires.isErr:
|
||||
debug "NAT-PMP", msg = nires.error
|
||||
else:
|
||||
try:
|
||||
externalIP = parseIpAddress($(nires.value))
|
||||
strategy = NatStrategy.NatPmp
|
||||
return some(externalIP)
|
||||
except ValueError as e:
|
||||
error "parseIpAddress() exception", err = e.msg
|
||||
return
|
||||
|
||||
# This queries the routing table to get the "preferred source" attribute and
|
||||
# checks if it's a public IP. If so, then it's our public IP.
|
||||
#
|
||||
# Further more, we check if the bind address (user provided, or a "0.0.0.0"
|
||||
# default) is a public IP. That's a long shot, because code paths involving a
|
||||
# user-provided bind address are not supposed to get here.
|
||||
proc getRoutePrefSrc(bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
|
||||
let bindAddress = initTAddress(bindIp, Port(0))
|
||||
|
||||
if bindAddress.isAnyLocal():
|
||||
let ip = getRouteIpv4()
|
||||
if ip.isErr():
|
||||
# No route was found, log error and continue without IP.
|
||||
error "No routable IP address found, check your network connection",
|
||||
error = ip.error
|
||||
return (none(IpAddress), NoRoutingInfo)
|
||||
elif ip.get().isGlobalUnicast():
|
||||
return (some(ip.get()), PrefSrcIsPublic)
|
||||
else:
|
||||
return (none(IpAddress), PrefSrcIsPrivate)
|
||||
elif bindAddress.isGlobalUnicast():
|
||||
return (some(bindIp), BindAddressIsPublic)
|
||||
else:
|
||||
return (none(IpAddress), BindAddressIsPrivate)
|
||||
|
||||
# Try to detect a public IP assigned to this host, before trying NAT traversal.
|
||||
proc getPublicRoutePrefSrcOrExternalIP*(
|
||||
natStrategy: NatStrategy, bindIp: IpAddress, quiet = true
|
||||
): Option[IpAddress] =
|
||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||
|
||||
case prefSrcStatus
|
||||
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
||||
return prefSrcIp
|
||||
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
||||
let extIp = getExternalIP(natStrategy, quiet)
|
||||
if extIp.isSome:
|
||||
return some(extIp.get)
|
||||
|
||||
proc doPortMapping(
|
||||
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
|
||||
): Option[(Port, Port)] {.gcsafe.} =
|
||||
var
|
||||
extTcpPort: Port
|
||||
extUdpPort: Port
|
||||
|
||||
if strategy == NatStrategy.NatUpnp:
|
||||
for t in [(tcpPort, UPNPProtocol.TCP), (udpPort, UPNPProtocol.UDP)]:
|
||||
let
|
||||
(port, protocol) = t
|
||||
pmres = upnp.addPortMapping(
|
||||
externalPort = $port,
|
||||
protocol = protocol,
|
||||
internalHost = upnp.lanAddr,
|
||||
internalPort = $port,
|
||||
desc = description,
|
||||
leaseDuration = 0,
|
||||
)
|
||||
if pmres.isErr:
|
||||
error "UPnP port mapping", msg = pmres.error, port
|
||||
return
|
||||
else:
|
||||
# let's check it
|
||||
let cres =
|
||||
upnp.getSpecificPortMapping(externalPort = $port, protocol = protocol)
|
||||
if cres.isErr:
|
||||
warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.",
|
||||
msg = cres.error
|
||||
|
||||
info "UPnP: added port mapping",
|
||||
externalPort = port, internalPort = port, protocol = protocol
|
||||
case protocol
|
||||
of UPNPProtocol.TCP:
|
||||
extTcpPort = port
|
||||
of UPNPProtocol.UDP:
|
||||
extUdpPort = port
|
||||
elif strategy == NatStrategy.NatPmp:
|
||||
for t in [(tcpPort, NatPmpProtocol.TCP), (udpPort, NatPmpProtocol.UDP)]:
|
||||
let
|
||||
(port, protocol) = t
|
||||
pmres = npmp.addPortMapping(
|
||||
eport = port.cushort,
|
||||
iport = port.cushort,
|
||||
protocol = protocol,
|
||||
lifetime = NATPMP_LIFETIME,
|
||||
)
|
||||
if pmres.isErr:
|
||||
error "NAT-PMP port mapping", msg = pmres.error, port
|
||||
return
|
||||
else:
|
||||
let extPort = Port(pmres.value)
|
||||
info "NAT-PMP: added port mapping",
|
||||
externalPort = extPort, internalPort = port, protocol = protocol
|
||||
case protocol
|
||||
of NatPmpProtocol.TCP:
|
||||
extTcpPort = extPort
|
||||
of NatPmpProtocol.UDP:
|
||||
extUdpPort = extPort
|
||||
return some((extTcpPort, extUdpPort))
|
||||
|
||||
proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
|
||||
ignoreSignalsInThread()
|
||||
let
|
||||
(strategy, tcpPort, udpPort, description) = args
|
||||
interval = initDuration(seconds = PORT_MAPPING_INTERVAL)
|
||||
sleepDuration = 1_000 # in ms, also the maximum delay after pressing Ctrl-C
|
||||
|
||||
var lastUpdate = now()
|
||||
|
||||
# We can't use copies of Miniupnp and NatPmp objects in this thread, because they share
|
||||
# C pointers with other instances that have already been garbage collected, so
|
||||
# we use threadvars instead and initialise them again with getExternalIP(),
|
||||
# even though we don't need the external IP's value.
|
||||
let ipres = getExternalIP(strategy, quiet = true)
|
||||
if ipres.isSome:
|
||||
while natClosed.load() == false:
|
||||
let
|
||||
# we're being silly here with this channel polling because we can't
|
||||
# select on Nim channels like on Go ones
|
||||
currTime = now()
|
||||
if currTime >= (lastUpdate + interval):
|
||||
discard doPortMapping(strategy, tcpPort, udpPort, description)
|
||||
lastUpdate = currTime
|
||||
|
||||
sleep(sleepDuration)
|
||||
|
||||
proc stopNatThreads() {.noconv.} =
|
||||
# stop the thread
|
||||
debug "Stopping NAT port mapping renewal threads"
|
||||
try:
|
||||
natClosed.store(true)
|
||||
joinThreads(natThreads)
|
||||
except Exception as exc:
|
||||
warn "Failed to stop NAT port mapping renewal thread", exc = exc.msg
|
||||
|
||||
# delete our port mappings
|
||||
|
||||
# FIXME: if the initial port mapping failed because it already existed for the
|
||||
# required external port, we should not delete it. It might have been set up
|
||||
# by another program.
|
||||
|
||||
# In Windows, a new thread is created for the signal handler, so we need to
|
||||
# initialise our threadvars again.
|
||||
|
||||
let ipres = getExternalIP(strategy, quiet = true)
|
||||
if ipres.isSome:
|
||||
if strategy == NatStrategy.NatUpnp:
|
||||
for entry in activeMappings:
|
||||
for t in [
|
||||
(entry.externalTcpPort, entry.internalTcpPort, UPNPProtocol.TCP),
|
||||
(entry.externalUdpPort, entry.internalUdpPort, UPNPProtocol.UDP),
|
||||
]:
|
||||
let
|
||||
(eport, iport, protocol) = t
|
||||
pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol)
|
||||
if pmres.isErr:
|
||||
error "UPnP port mapping deletion", msg = pmres.error
|
||||
else:
|
||||
debug "UPnP: deleted port mapping",
|
||||
externalPort = eport, internalPort = iport, protocol = protocol
|
||||
elif strategy == NatStrategy.NatPmp:
|
||||
for entry in activeMappings:
|
||||
for t in [
|
||||
(entry.externalTcpPort, entry.internalTcpPort, NatPmpProtocol.TCP),
|
||||
(entry.externalUdpPort, entry.internalUdpPort, NatPmpProtocol.UDP),
|
||||
]:
|
||||
let
|
||||
(eport, iport, protocol) = t
|
||||
pmres = npmp.deletePortMapping(
|
||||
eport = eport.cushort, iport = iport.cushort, protocol = protocol
|
||||
)
|
||||
if pmres.isErr:
|
||||
error "NAT-PMP port mapping deletion", msg = pmres.error
|
||||
else:
|
||||
debug "NAT-PMP: deleted port mapping",
|
||||
externalPort = eport, internalPort = iport, protocol = protocol
|
||||
|
||||
proc redirectPorts*(
|
||||
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
|
||||
): Option[(Port, Port)] =
|
||||
result = doPortMapping(strategy, tcpPort, udpPort, description)
|
||||
if result.isSome:
|
||||
let (externalTcpPort, externalUdpPort) = result.get()
|
||||
# needed by NAT-PMP on port mapping deletion
|
||||
# Port mapping works. Let's launch a thread that repeats it, in case the
|
||||
# NAT-PMP lease expires or the router is rebooted and forgets all about
|
||||
# these mappings.
|
||||
activeMappings.add(
|
||||
PortMappings(
|
||||
internalTcpPort: tcpPort,
|
||||
externalTcpPort: externalTcpPort,
|
||||
internalUdpPort: udpPort,
|
||||
externalUdpPort: externalUdpPort,
|
||||
description: description,
|
||||
)
|
||||
)
|
||||
try:
|
||||
natThreads.add(Thread[PortMappingArgs]())
|
||||
natThreads[^1].createThread(
|
||||
repeatPortMapping, (strategy, externalTcpPort, externalUdpPort, description)
|
||||
)
|
||||
# atexit() in disguise
|
||||
if natThreads.len == 1:
|
||||
# we should register the thread termination function only once
|
||||
addQuitProc(stopNatThreads)
|
||||
except Exception as exc:
|
||||
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
|
||||
|
||||
proc setupNat*(
|
||||
natStrategy: NatStrategy, tcpPort, udpPort: Port, clientId: string
|
||||
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
|
||||
## Setup NAT port mapping and get external IP address.
|
||||
## If any of this fails, we don't return any IP address but do return the
|
||||
## original ports as best effort.
|
||||
## TODO: Allow for tcp or udp port mapping to be optional.
|
||||
if extIp.isNone:
|
||||
extIp = getExternalIP(natStrategy)
|
||||
if extIp.isSome:
|
||||
let ip = extIp.get
|
||||
let extPorts = (
|
||||
{.gcsafe.}:
|
||||
redirectPorts(
|
||||
strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId
|
||||
)
|
||||
)
|
||||
if extPorts.isSome:
|
||||
let (extTcpPort, extUdpPort) = extPorts.get()
|
||||
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
|
||||
else:
|
||||
warn "UPnP/NAT-PMP available but port forwarding failed"
|
||||
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
||||
else:
|
||||
warn "UPnP/NAT-PMP not available"
|
||||
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
||||
|
||||
proc setupAddress*(
|
||||
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
|
||||
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =
|
||||
## Set-up of the external address via any of the ways as configured in
|
||||
## `NatConfig`. In case all fails an error is logged and the bind ports are
|
||||
## selected also as external ports, as best effort and in hope that the
|
||||
## external IP can be figured out by other means at a later stage.
|
||||
## TODO: Allow for tcp or udp bind ports to be optional.
|
||||
|
||||
if natConfig.hasExtIp:
|
||||
# any required port redirection must be done by hand
|
||||
return (some(natConfig.extIp), some(tcpPort), some(udpPort))
|
||||
|
||||
case natConfig.nat
|
||||
of NatStrategy.NatAny:
|
||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||
|
||||
case prefSrcStatus
|
||||
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
||||
return (prefSrcIp, some(tcpPort), some(udpPort))
|
||||
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
||||
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
||||
of NatStrategy.NatNone:
|
||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||
|
||||
case prefSrcStatus
|
||||
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
||||
return (prefSrcIp, some(tcpPort), some(udpPort))
|
||||
of PrefSrcIsPrivate:
|
||||
error "No public IP address found. Should not use --nat:none option"
|
||||
return (none(IpAddress), some(tcpPort), some(udpPort))
|
||||
of BindAddressIsPrivate:
|
||||
error "Bind IP is not a public IP address. Should not use --nat:none option"
|
||||
return (none(IpAddress), some(tcpPort), some(udpPort))
|
||||
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
|
||||
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
||||
|
||||
proc nattedAddress*(
|
||||
natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port
|
||||
): tuple[libp2p, discovery: seq[MultiAddress]] =
|
||||
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
|
||||
## - Modified multiaddresses with NAT-mapped addresses for libp2p
|
||||
## - Discovery addresses with NAT-mapped UDP ports
|
||||
|
||||
var discoveryAddrs = newSeq[MultiAddress](0)
|
||||
let newAddrs = addrs.mapIt:
|
||||
block:
|
||||
# Extract IP address and port from the multiaddress
|
||||
let (ipPart, port) = getAddressAndPort(it)
|
||||
if ipPart.isSome and port.isSome:
|
||||
# Try to setup NAT mapping for the address
|
||||
let (newIP, tcp, udp) =
|
||||
setupAddress(natConfig, ipPart.get, port.get, udpPort, "codex")
|
||||
if newIP.isSome:
|
||||
# NAT mapping successful - add discovery address with mapped UDP port
|
||||
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get))
|
||||
# Remap original address with NAT IP and TCP port
|
||||
it.remapAddr(ip = newIP, port = tcp)
|
||||
else:
|
||||
# NAT mapping failed - use original address
|
||||
echo "Failed to get external IP, using original address", it
|
||||
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(ipPart.get, udpPort))
|
||||
it
|
||||
else:
|
||||
# Invalid multiaddress format - return as is
|
||||
it
|
||||
(newAddrs, discoveryAddrs)
|
||||
686
codex/node.nim
686
codex/node.nim
File diff suppressed because it is too large
Load Diff
@ -2,9 +2,10 @@ import pkg/stint
|
||||
|
||||
type
|
||||
Periodicity* = object
|
||||
seconds*: UInt256
|
||||
Period* = UInt256
|
||||
Timestamp* = UInt256
|
||||
seconds*: uint64
|
||||
|
||||
Period* = uint64
|
||||
Timestamp* = uint64
|
||||
|
||||
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
|
||||
timestamp div periodicity.seconds
|
||||
|
||||
@ -14,20 +14,17 @@ export purchase
|
||||
|
||||
type
|
||||
Purchasing* = ref object
|
||||
market: Market
|
||||
market*: Market
|
||||
clock: Clock
|
||||
purchases: Table[PurchaseId, Purchase]
|
||||
proofProbability*: UInt256
|
||||
|
||||
PurchaseTimeout* = Timeout
|
||||
|
||||
const DefaultProofProbability = 100.u256
|
||||
|
||||
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
||||
Purchasing(
|
||||
market: market,
|
||||
clock: clock,
|
||||
proofProbability: DefaultProofProbability,
|
||||
)
|
||||
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
|
||||
|
||||
proc load*(purchasing: Purchasing) {.async.} =
|
||||
let market = purchasing.market
|
||||
@ -43,9 +40,9 @@ proc start*(purchasing: Purchasing) {.async.} =
|
||||
proc stop*(purchasing: Purchasing) {.async.} =
|
||||
discard
|
||||
|
||||
proc populate*(purchasing: Purchasing,
|
||||
request: StorageRequest
|
||||
): Future[StorageRequest] {.async.} =
|
||||
proc populate*(
|
||||
purchasing: Purchasing, request: StorageRequest
|
||||
): Future[StorageRequest] {.async.} =
|
||||
result = request
|
||||
if result.ask.proofProbability == 0.u256:
|
||||
result.ask.proofProbability = purchasing.proofProbability
|
||||
@ -55,9 +52,9 @@ proc populate*(purchasing: Purchasing,
|
||||
result.nonce = Nonce(id)
|
||||
result.client = await purchasing.market.getSigner()
|
||||
|
||||
proc purchase*(purchasing: Purchasing,
|
||||
request: StorageRequest
|
||||
): Future[Purchase] {.async.} =
|
||||
proc purchase*(
|
||||
purchasing: Purchasing, request: StorageRequest
|
||||
): Future[Purchase] {.async.} =
|
||||
let request = await purchasing.populate(request)
|
||||
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
||||
purchase.start()
|
||||
@ -75,4 +72,3 @@ func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
|
||||
for key in purchasing.purchases.keys:
|
||||
pIds.add(key)
|
||||
return pIds
|
||||
|
||||
|
||||
@ -25,10 +25,7 @@ export purchaseid
|
||||
export statemachine
|
||||
|
||||
func new*(
|
||||
_: type Purchase,
|
||||
requestId: RequestId,
|
||||
market: Market,
|
||||
clock: Clock
|
||||
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
|
||||
): Purchase =
|
||||
## create a new instance of a Purchase
|
||||
##
|
||||
@ -42,10 +39,7 @@ func new*(
|
||||
return purchase
|
||||
|
||||
func new*(
|
||||
_: type Purchase,
|
||||
request: StorageRequest,
|
||||
market: Market,
|
||||
clock: Clock
|
||||
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
|
||||
): Purchase =
|
||||
## Create a new purchase using the given market and clock
|
||||
let purchase = Purchase.new(request.id, market, clock)
|
||||
@ -76,4 +70,5 @@ func error*(purchase: Purchase): ?(ref CatchableError) =
|
||||
func state*(purchase: Purchase): ?string =
|
||||
proc description(state: State): string =
|
||||
$state
|
||||
|
||||
purchase.query(description)
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
import std/hashes
|
||||
import pkg/nimcrypto
|
||||
import ../logutils
|
||||
|
||||
type PurchaseId* = distinct array[32, byte]
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, PurchaseId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, PurchaseId):
|
||||
it.to0xHexLog
|
||||
|
||||
proc hash*(x: PurchaseId): Hash {.borrow.}
|
||||
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
||||
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
|
||||
proc toHex*(x: PurchaseId): string =
|
||||
array[32, byte](x).toHex
|
||||
|
||||
@ -14,5 +14,6 @@ type
|
||||
clock*: Clock
|
||||
requestId*: RequestId
|
||||
request*: ?StorageRequest
|
||||
|
||||
PurchaseState* = ref object of State
|
||||
PurchaseError* = object of CodexError
|
||||
|
||||
@ -1,25 +1,35 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases cancelled"
|
||||
|
||||
type PurchaseCancelled* = ref object of ErrorHandlingState
|
||||
type PurchaseCancelled* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseCancelled): string =
|
||||
"cancelled"
|
||||
|
||||
method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseCancelled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_cancelled.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
try:
|
||||
warn "Request cancelled, withdrawing remaining funds",
|
||||
requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
purchase.future.fail(error)
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
purchase.future.fail(error)
|
||||
except CancelledError as e:
|
||||
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseCancelled.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -14,10 +14,13 @@ type PurchaseErrored* = ref object of PurchaseState
|
||||
method `$`*(state: PurchaseErrored): string =
|
||||
"errored"
|
||||
|
||||
method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseErrored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_error.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId
|
||||
error "Purchasing error",
|
||||
error = state.error.msgDetail, requestId = purchase.requestId
|
||||
|
||||
purchase.future.fail(state.error)
|
||||
|
||||
@ -1,9 +0,0 @@
|
||||
import pkg/questionable
|
||||
import ../statemachine
|
||||
import ./error
|
||||
|
||||
type
|
||||
ErrorHandlingState* = ref object of PurchaseState
|
||||
|
||||
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||
some State(PurchaseErrored(error: error))
|
||||
@ -1,21 +1,30 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_failed, "codex purchases failed")
|
||||
|
||||
type
|
||||
PurchaseFailed* = ref object of PurchaseState
|
||||
type PurchaseFailed* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseFailed): string =
|
||||
"failed"
|
||||
|
||||
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseFailed, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_failed.inc()
|
||||
let purchase = Purchase(machine)
|
||||
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
try:
|
||||
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
except CancelledError as e:
|
||||
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseFailed.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
let error = newException(PurchaseError, "Purchase failed")
|
||||
return some State(PurchaseErrored(error: error))
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../statemachine
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_finished, "codex purchases finished")
|
||||
|
||||
@ -13,10 +15,19 @@ type PurchaseFinished* = ref object of PurchaseState
|
||||
method `$`*(state: PurchaseFinished): string =
|
||||
"finished"
|
||||
|
||||
method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseFinished, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_finished.inc()
|
||||
let purchase = Purchase(machine)
|
||||
info "Purchase finished, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
try:
|
||||
info "Purchase finished, withdrawing remaining funds",
|
||||
requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
purchase.future.complete()
|
||||
purchase.future.complete()
|
||||
except CancelledError as e:
|
||||
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseFinished.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -1,18 +1,28 @@
|
||||
import pkg/metrics
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_pending, "codex purchases pending")
|
||||
|
||||
type PurchasePending* = ref object of ErrorHandlingState
|
||||
type PurchasePending* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchasePending): string =
|
||||
"pending"
|
||||
|
||||
method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchasePending, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_pending.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
return some State(PurchaseSubmitted())
|
||||
try:
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
return some State(PurchaseSubmitted())
|
||||
except CancelledError as e:
|
||||
trace "PurchasePending.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchasePending.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -1,22 +1,25 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_started, "codex purchases started")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases started"
|
||||
|
||||
type PurchaseStarted* = ref object of ErrorHandlingState
|
||||
type PurchaseStarted* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseStarted): string =
|
||||
"started"
|
||||
|
||||
method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseStarted, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_started.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
@ -27,15 +30,25 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.}
|
||||
let failed = newFuture[void]()
|
||||
proc callback(_: RequestId) =
|
||||
failed.complete()
|
||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||
|
||||
# Ensure that we're past the request end by waiting an additional second
|
||||
let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||
let fut = await one(ended, failed)
|
||||
await subscription.unsubscribe()
|
||||
if fut.id == failed.id:
|
||||
ended.cancel()
|
||||
return some State(PurchaseFailed())
|
||||
else:
|
||||
failed.cancel()
|
||||
return some State(PurchaseFinished())
|
||||
var ended: Future[void]
|
||||
try:
|
||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||
|
||||
# Ensure that we're past the request end by waiting an additional second
|
||||
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||
let fut = await one(ended, failed)
|
||||
await subscription.unsubscribe()
|
||||
if fut.id == failed.id:
|
||||
ended.cancelSoon()
|
||||
return some State(PurchaseFailed())
|
||||
else:
|
||||
failed.cancelSoon()
|
||||
return some State(PurchaseFinished())
|
||||
except CancelledError as e:
|
||||
ended.cancelSoon()
|
||||
failed.cancelSoon()
|
||||
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseStarted.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -1,36 +1,41 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./error
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases submitted"
|
||||
|
||||
declareCounter(codex_purchases_submitted, "codex purchases submitted")
|
||||
|
||||
type PurchaseSubmitted* = ref object of ErrorHandlingState
|
||||
type PurchaseSubmitted* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseSubmitted): string =
|
||||
"submitted"
|
||||
|
||||
method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseSubmitted, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_submitted.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
let market = purchase.market
|
||||
let clock = purchase.clock
|
||||
|
||||
info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId
|
||||
info "Request submitted, waiting for slots to be filled",
|
||||
requestId = purchase.requestId
|
||||
|
||||
proc wait {.async.} =
|
||||
let done = newFuture[void]()
|
||||
proc wait() {.async.} =
|
||||
let done = newAsyncEvent()
|
||||
proc callback(_: RequestId) =
|
||||
done.complete()
|
||||
done.fire()
|
||||
|
||||
let subscription = await market.subscribeFulfillment(request.id, callback)
|
||||
await done
|
||||
await done.wait()
|
||||
await subscription.unsubscribe()
|
||||
|
||||
proc withTimeout(future: Future[void]) {.async.} =
|
||||
@ -42,5 +47,10 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.
|
||||
await wait().withTimeout()
|
||||
except Timeout:
|
||||
return some State(PurchaseCancelled())
|
||||
except CancelledError as e:
|
||||
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseSubmitted.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
return some State(PurchaseStarted())
|
||||
|
||||
@ -1,35 +1,44 @@
|
||||
import pkg/metrics
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_unknown, "codex purchases unknown")
|
||||
|
||||
type PurchaseUnknown* = ref object of ErrorHandlingState
|
||||
type PurchaseUnknown* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseUnknown): string =
|
||||
"unknown"
|
||||
|
||||
method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_unknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
method run*(
|
||||
state: PurchaseUnknown, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
try:
|
||||
codex_purchases_unknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
purchase.request = some request
|
||||
|
||||
purchase.request = some request
|
||||
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
return some State(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
return some State(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
return some State(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
return some State(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
return some State(PurchaseFailed())
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
return some State(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
return some State(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
return some State(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
return some State(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
return some State(PurchaseFailed())
|
||||
except CancelledError as e:
|
||||
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseUnknown.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
1374
codex/rest/api.nim
1374
codex/rest/api.nim
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -14,7 +14,7 @@ import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/stew/base10
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
import pkg/stint
|
||||
|
||||
import ../sales
|
||||
@ -25,9 +25,7 @@ proc encodeString*(cid: type Cid): Result[string, cstring] =
|
||||
ok($cid)
|
||||
|
||||
proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] =
|
||||
Cid
|
||||
.init(value)
|
||||
.mapErr do(e: CidError) -> cstring:
|
||||
Cid.init(value).mapErr do(e: CidError) -> cstring:
|
||||
case e
|
||||
of CidError.Incorrect: "Incorrect Cid".cstring
|
||||
of CidError.Unsupported: "Unsupported Cid".cstring
|
||||
@ -44,9 +42,8 @@ proc encodeString*(address: MultiAddress): Result[string, cstring] =
|
||||
ok($address)
|
||||
|
||||
proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] =
|
||||
MultiAddress
|
||||
.init(value)
|
||||
.mapErr do(e: string) -> cstring: cstring(e)
|
||||
MultiAddress.init(value).mapErr do(e: string) -> cstring:
|
||||
cstring(e)
|
||||
|
||||
proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] =
|
||||
Base10.decode(T, value)
|
||||
@ -55,7 +52,7 @@ proc encodeString*(value: SomeUnsignedInt): Result[string, cstring] =
|
||||
ok(Base10.toString(value))
|
||||
|
||||
proc decodeString*(T: type Duration, value: string): Result[T, cstring] =
|
||||
let v = ? Base10.decode(uint32, value)
|
||||
let v = ?Base10.decode(uint32, value)
|
||||
ok(v.minutes)
|
||||
|
||||
proc encodeString*(value: Duration): Result[string, cstring] =
|
||||
@ -77,19 +74,20 @@ proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] =
|
||||
except ValueError as e:
|
||||
err e.msg.cstring
|
||||
|
||||
proc decodeString*(_: type array[32, byte],
|
||||
value: string): Result[array[32, byte], cstring] =
|
||||
proc decodeString*(
|
||||
_: type array[32, byte], value: string
|
||||
): Result[array[32, byte], cstring] =
|
||||
try:
|
||||
ok array[32, byte].fromHex(value)
|
||||
except ValueError as e:
|
||||
err e.msg.cstring
|
||||
|
||||
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T,
|
||||
value: string): Result[T, cstring] =
|
||||
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
|
||||
_: type T, value: string
|
||||
): Result[T, cstring] =
|
||||
array[32, byte].decodeString(value).map(id => T(id))
|
||||
|
||||
proc decodeString*(t: typedesc[string],
|
||||
value: string): Result[string, cstring] =
|
||||
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
|
||||
ok(value)
|
||||
|
||||
proc encodeString*(value: string): RestResult[string] =
|
||||
|
||||
@ -13,11 +13,11 @@ export json
|
||||
|
||||
type
|
||||
StorageRequestParams* = object
|
||||
duration* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: uint64
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
reward* {.serialize.}: UInt256
|
||||
collateral* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: ?UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: uint64
|
||||
nodes* {.serialize.}: ?uint
|
||||
tolerance* {.serialize.}: ?uint
|
||||
|
||||
@ -28,16 +28,18 @@ type
|
||||
error* {.serialize.}: ?string
|
||||
|
||||
RestAvailability* = object
|
||||
totalSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
minPrice* {.serialize.}: UInt256
|
||||
maxCollateral* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: ?UInt256
|
||||
totalSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: ?uint64
|
||||
enabled* {.serialize.}: ?bool
|
||||
until* {.serialize.}: ?SecondsSince1970
|
||||
|
||||
RestSalesAgent* = object
|
||||
state* {.serialize.}: string
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
slotIndex* {.serialize.}: uint64
|
||||
request* {.serialize.}: ?StorageRequest
|
||||
reservation* {.serialize.}: ?Reservation
|
||||
|
||||
@ -74,15 +76,10 @@ type
|
||||
quotaReservedBytes* {.serialize.}: NBytes
|
||||
|
||||
proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList =
|
||||
RestContentList(
|
||||
content: content
|
||||
)
|
||||
RestContentList(content: content)
|
||||
|
||||
proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent =
|
||||
RestContent(
|
||||
cid: cid,
|
||||
manifest: manifest
|
||||
)
|
||||
RestContent(cid: cid, manifest: manifest)
|
||||
|
||||
proc init*(_: type RestNode, node: dn.Node): RestNode =
|
||||
RestNode(
|
||||
@ -90,7 +87,7 @@ proc init*(_: type RestNode, node: dn.Node): RestNode =
|
||||
peerId: node.record.data.peerId,
|
||||
record: node.record,
|
||||
address: node.address,
|
||||
seen: node.seen
|
||||
seen: node.seen > 0.5,
|
||||
)
|
||||
|
||||
proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable =
|
||||
@ -99,28 +96,23 @@ proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRouting
|
||||
for node in bucket.nodes:
|
||||
nodes.add(RestNode.init(node))
|
||||
|
||||
RestRoutingTable(
|
||||
localNode: RestNode.init(routingTable.localNode),
|
||||
nodes: nodes
|
||||
)
|
||||
RestRoutingTable(localNode: RestNode.init(routingTable.localNode), nodes: nodes)
|
||||
|
||||
proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord =
|
||||
RestPeerRecord(
|
||||
peerId: peerRecord.peerId,
|
||||
seqNo: peerRecord.seqNo,
|
||||
addresses: peerRecord.addresses
|
||||
peerId: peerRecord.peerId, seqNo: peerRecord.seqNo, addresses: peerRecord.addresses
|
||||
)
|
||||
|
||||
proc init*(_: type RestNodeId, id: NodeId): RestNodeId =
|
||||
RestNodeId(
|
||||
id: id
|
||||
)
|
||||
RestNodeId(id: id)
|
||||
|
||||
proc `%`*(obj: StorageRequest | Slot): JsonNode =
|
||||
let jsonObj = newJObject()
|
||||
for k, v in obj.fieldPairs: jsonObj[k] = %v
|
||||
for k, v in obj.fieldPairs:
|
||||
jsonObj[k] = %v
|
||||
jsonObj["id"] = %(obj.id)
|
||||
|
||||
return jsonObj
|
||||
|
||||
proc `%`*(obj: RestNodeId): JsonNode = % $obj.id
|
||||
proc `%`*(obj: RestNodeId): JsonNode =
|
||||
% $obj.id
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p/crypto/crypto
|
||||
import pkg/bearssl/rand
|
||||
@ -30,7 +28,8 @@ proc instance*(t: type Rng): Rng =
|
||||
const randMax = 18_446_744_073_709_551_615'u64
|
||||
|
||||
proc rand*(rng: Rng, max: Natural): int =
|
||||
if max == 0: return 0
|
||||
if max == 0:
|
||||
return 0
|
||||
|
||||
while true:
|
||||
let x = rng[].generate(uint64)
|
||||
@ -41,8 +40,8 @@ proc sample*[T](rng: Rng, a: openArray[T]): T =
|
||||
result = a[rng.rand(a.high)]
|
||||
|
||||
proc sample*[T](
|
||||
rng: Rng, sample, exclude: openArray[T]): T
|
||||
{.raises: [Defect, RngSampleError].} =
|
||||
rng: Rng, sample, exclude: openArray[T]
|
||||
): T {.raises: [Defect, RngSampleError].} =
|
||||
if sample == exclude:
|
||||
raise newException(RngSampleError, "Sample and exclude arrays are the same!")
|
||||
|
||||
@ -53,6 +52,15 @@ proc sample*[T](
|
||||
|
||||
break
|
||||
|
||||
proc sample*[T](
|
||||
rng: Rng, sample: openArray[T], limit: int
|
||||
): seq[T] {.raises: [Defect, RngSampleError].} =
|
||||
if limit > sample.len:
|
||||
raise newException(RngSampleError, "Limit cannot be larger than sample!")
|
||||
|
||||
for _ in 0 ..< min(sample.len, limit):
|
||||
result.add(rng.sample(sample, result))
|
||||
|
||||
proc shuffle*[T](rng: Rng, a: var openArray[T]) =
|
||||
for i in countdown(a.high, 1):
|
||||
let j = rng.rand(i)
|
||||
|
||||
328
codex/sales.nim
328
codex/sales.nim
@ -16,13 +16,13 @@ import ./sales/statemachine
|
||||
import ./sales/slotqueue
|
||||
import ./sales/states/preparing
|
||||
import ./sales/states/unknown
|
||||
import ./utils/then
|
||||
import ./utils/trackedfutures
|
||||
import ./utils/exceptions
|
||||
|
||||
## Sales holds a list of available storage that it may sell.
|
||||
##
|
||||
## When storage is requested on the market that matches availability, the Sales
|
||||
## object will instruct the Codex node to persist the requested data. Once the
|
||||
## object will instruct the Logos Storage node to persist the requested data. Once the
|
||||
## data has been persisted, it uploads a proof of storage to the market in an
|
||||
## attempt to win a storage contract.
|
||||
##
|
||||
@ -45,13 +45,12 @@ export salescontext
|
||||
logScope:
|
||||
topics = "sales marketplace"
|
||||
|
||||
type
|
||||
Sales* = ref object
|
||||
context*: SalesContext
|
||||
agents*: seq[SalesAgent]
|
||||
running: bool
|
||||
subscriptions: seq[market.Subscription]
|
||||
trackedFutures: TrackedFutures
|
||||
type Sales* = ref object
|
||||
context*: SalesContext
|
||||
agents*: seq[SalesAgent]
|
||||
running: bool
|
||||
subscriptions: seq[market.Subscription]
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc `onStore=`*(sales: Sales, onStore: OnStore) =
|
||||
sales.context.onStore = some onStore
|
||||
@ -68,28 +67,31 @@ proc `onProve=`*(sales: Sales, callback: OnProve) =
|
||||
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
|
||||
sales.context.onExpiryUpdate = some callback
|
||||
|
||||
proc onStore*(sales: Sales): ?OnStore = sales.context.onStore
|
||||
proc onStore*(sales: Sales): ?OnStore =
|
||||
sales.context.onStore
|
||||
|
||||
proc onClear*(sales: Sales): ?OnClear = sales.context.onClear
|
||||
proc onClear*(sales: Sales): ?OnClear =
|
||||
sales.context.onClear
|
||||
|
||||
proc onSale*(sales: Sales): ?OnSale = sales.context.onSale
|
||||
proc onSale*(sales: Sales): ?OnSale =
|
||||
sales.context.onSale
|
||||
|
||||
proc onProve*(sales: Sales): ?OnProve = sales.context.onProve
|
||||
proc onProve*(sales: Sales): ?OnProve =
|
||||
sales.context.onProve
|
||||
|
||||
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate
|
||||
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate =
|
||||
sales.context.onExpiryUpdate
|
||||
|
||||
proc new*(_: type Sales,
|
||||
market: Market,
|
||||
clock: Clock,
|
||||
repo: RepoStore): Sales =
|
||||
proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales =
|
||||
Sales.new(market, clock, repo, 0)
|
||||
|
||||
proc new*(_: type Sales,
|
||||
market: Market,
|
||||
clock: Clock,
|
||||
repo: RepoStore,
|
||||
simulateProofFailures: int): Sales =
|
||||
|
||||
proc new*(
|
||||
_: type Sales,
|
||||
market: Market,
|
||||
clock: Clock,
|
||||
repo: RepoStore,
|
||||
simulateProofFailures: int,
|
||||
): Sales =
|
||||
let reservations = Reservations.new(repo)
|
||||
Sales(
|
||||
context: SalesContext(
|
||||
@ -97,117 +99,110 @@ proc new*(_: type Sales,
|
||||
clock: clock,
|
||||
reservations: reservations,
|
||||
slotQueue: SlotQueue.new(),
|
||||
simulateProofFailures: simulateProofFailures
|
||||
simulateProofFailures: simulateProofFailures,
|
||||
),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
subscriptions: @[]
|
||||
subscriptions: @[],
|
||||
)
|
||||
|
||||
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
||||
proc remove(sales: Sales, agent: SalesAgent) {.async: (raises: []).} =
|
||||
await agent.stop()
|
||||
|
||||
if sales.running:
|
||||
sales.agents.keepItIf(it != agent)
|
||||
|
||||
proc cleanUp(sales: Sales,
|
||||
agent: SalesAgent,
|
||||
returnBytes: bool,
|
||||
reprocessSlot: bool,
|
||||
processing: Future[void]) {.async.} =
|
||||
|
||||
proc cleanUp(
|
||||
sales: Sales, agent: SalesAgent, reprocessSlot: bool, returnedCollateral: ?UInt256
|
||||
) {.async: (raises: []).} =
|
||||
let data = agent.data
|
||||
|
||||
logScope:
|
||||
topics = "sales cleanUp"
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
reservationId = data.reservation.?id |? ReservationId.default
|
||||
availabilityId = data.reservation.?availabilityId |? AvailabilityId.default
|
||||
reservationId = data.reservation .? id |? ReservationId.default
|
||||
availabilityId = data.reservation .? availabilityId |? AvailabilityId.default
|
||||
|
||||
trace "cleaning up sales agent"
|
||||
|
||||
# if reservation for the SalesAgent was not created, then it means
|
||||
# that the cleanUp was called before the sales process really started, so
|
||||
# there are not really any bytes to be returned
|
||||
if returnBytes and request =? data.request and reservation =? data.reservation:
|
||||
if returnErr =? (await sales.context.reservations.returnBytesToAvailability(
|
||||
reservation.availabilityId,
|
||||
reservation.id,
|
||||
request.ask.slotSize
|
||||
)).errorOption:
|
||||
error "failure returning bytes",
|
||||
error = returnErr.msg,
|
||||
bytes = request.ask.slotSize
|
||||
if request =? data.request and reservation =? data.reservation:
|
||||
if returnErr =? (
|
||||
await noCancel sales.context.reservations.returnBytesToAvailability(
|
||||
reservation.availabilityId, reservation.id, request.ask.slotSize
|
||||
)
|
||||
).errorOption:
|
||||
error "failure returning bytes",
|
||||
error = returnErr.msg, bytes = request.ask.slotSize
|
||||
|
||||
# delete reservation and return reservation bytes back to the availability
|
||||
if reservation =? data.reservation and
|
||||
deleteErr =? (await sales.context.reservations.deleteReservation(
|
||||
reservation.id,
|
||||
reservation.availabilityId
|
||||
)).errorOption:
|
||||
# delete reservation and return reservation bytes back to the availability
|
||||
if reservation =? data.reservation and
|
||||
deleteErr =? (
|
||||
await noCancel sales.context.reservations.deleteReservation(
|
||||
reservation.id, reservation.availabilityId, returnedCollateral
|
||||
)
|
||||
).errorOption:
|
||||
error "failure deleting reservation", error = deleteErr.msg
|
||||
|
||||
# Re-add items back into the queue to prevent small availabilities from
|
||||
# draining the queue. Seen items will be ordered last.
|
||||
if reprocessSlot and request =? data.request:
|
||||
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
|
||||
let queue = sales.context.slotQueue
|
||||
var seenItem = SlotQueueItem.init(data.requestId,
|
||||
data.slotIndex.truncate(uint16),
|
||||
data.ask,
|
||||
request.expiry,
|
||||
seen = true)
|
||||
item.seen = true
|
||||
trace "pushing ignored item to queue, marked as seen"
|
||||
if err =? queue.push(seenItem).errorOption:
|
||||
error "failed to readd slot to queue",
|
||||
errorType = $(type err), error = err.msg
|
||||
if err =? queue.push(item).errorOption:
|
||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||
|
||||
await sales.remove(agent)
|
||||
|
||||
# signal back to the slot queue to cycle a worker
|
||||
if not processing.isNil and not processing.finished():
|
||||
processing.complete()
|
||||
|
||||
proc filled(
|
||||
sales: Sales,
|
||||
request: StorageRequest,
|
||||
slotIndex: UInt256,
|
||||
processing: Future[void]) =
|
||||
let fut = sales.remove(agent)
|
||||
sales.trackedFutures.track(fut)
|
||||
|
||||
proc filled(sales: Sales, request: StorageRequest, slotIndex: uint64) =
|
||||
if onSale =? sales.context.onSale:
|
||||
onSale(request, slotIndex)
|
||||
|
||||
# signal back to the slot queue to cycle a worker
|
||||
if not processing.isNil and not processing.finished():
|
||||
processing.complete()
|
||||
|
||||
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
||||
debug "Processing slot from queue", requestId = item.requestId,
|
||||
slot = item.slotIndex
|
||||
proc processSlot(
|
||||
sales: Sales, item: SlotQueueItem
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
|
||||
|
||||
let agent = newSalesAgent(
|
||||
sales.context,
|
||||
item.requestId,
|
||||
item.slotIndex.u256,
|
||||
none StorageRequest
|
||||
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
|
||||
)
|
||||
|
||||
agent.onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} =
|
||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, done)
|
||||
let completed = newAsyncEvent()
|
||||
|
||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) =
|
||||
sales.filled(request, slotIndex, done)
|
||||
agent.onCleanUp = proc(
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async: (raises: []).} =
|
||||
trace "slot cleanup"
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
|
||||
completed.fire()
|
||||
|
||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
|
||||
trace "slot filled"
|
||||
sales.filled(request, slotIndex)
|
||||
completed.fire()
|
||||
|
||||
agent.start(SalePreparing())
|
||||
sales.agents.add agent
|
||||
|
||||
trace "waiting for slot processing to complete"
|
||||
await completed.wait()
|
||||
trace "slot processing completed"
|
||||
|
||||
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
|
||||
let reservations = sales.context.reservations
|
||||
without reservs =? await reservations.all(Reservation):
|
||||
return
|
||||
|
||||
let unused = reservs.filter(r => (
|
||||
let slotId = slotId(r.requestId, r.slotIndex)
|
||||
not activeSlots.any(slot => slot.id == slotId)
|
||||
))
|
||||
let unused = reservs.filter(
|
||||
r => (
|
||||
let slotId = slotId(r.requestId, r.slotIndex)
|
||||
not activeSlots.any(slot => slot.id == slotId)
|
||||
)
|
||||
)
|
||||
|
||||
if unused.len == 0:
|
||||
return
|
||||
@ -215,14 +210,13 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.}
|
||||
info "Found unused reservations for deletion", unused = unused.len
|
||||
|
||||
for reservation in unused:
|
||||
|
||||
logScope:
|
||||
reservationId = reservation.id
|
||||
availabilityId = reservation.availabilityId
|
||||
|
||||
if err =? (await reservations.deleteReservation(
|
||||
reservation.id, reservation.availabilityId
|
||||
)).errorOption:
|
||||
if err =? (
|
||||
await reservations.deleteReservation(reservation.id, reservation.availabilityId)
|
||||
).errorOption:
|
||||
error "Failed to delete unused reservation", error = err.msg
|
||||
else:
|
||||
trace "Deleted unused reservation"
|
||||
@ -252,17 +246,13 @@ proc load*(sales: Sales) {.async.} =
|
||||
await sales.deleteInactiveReservations(activeSlots)
|
||||
|
||||
for slot in activeSlots:
|
||||
let agent = newSalesAgent(
|
||||
sales.context,
|
||||
slot.request.id,
|
||||
slot.slotIndex,
|
||||
some slot.request)
|
||||
let agent =
|
||||
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
||||
|
||||
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
|
||||
# since workers are not being dispatched, this future has not been created
|
||||
# by a worker. Create a dummy one here so we can call sales.cleanUp
|
||||
let done: Future[void] = nil
|
||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, done)
|
||||
agent.onCleanUp = proc(
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async: (raises: []).} =
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
|
||||
|
||||
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
||||
# are inherently already filled and so assigning agent.onFilled would be
|
||||
@ -271,7 +261,9 @@ proc load*(sales: Sales) {.async.} =
|
||||
agent.start(SaleUnknown())
|
||||
sales.agents.add agent
|
||||
|
||||
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
proc OnAvailabilitySaved(
|
||||
sales: Sales, availability: Availability
|
||||
) {.async: (raises: []).} =
|
||||
## When availabilities are modified or added, the queue should be unpaused if
|
||||
## it was paused and any slots in the queue should have their `seen` flag
|
||||
## cleared.
|
||||
@ -282,11 +274,9 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
trace "unpausing queue after new availability added"
|
||||
queue.unpause()
|
||||
|
||||
proc onStorageRequested(sales: Sales,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) =
|
||||
|
||||
proc onStorageRequested(
|
||||
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
logScope:
|
||||
topics = "marketplace sales onStorageRequested"
|
||||
requestId
|
||||
@ -297,7 +287,14 @@ proc onStorageRequested(sales: Sales,
|
||||
|
||||
trace "storage requested, adding slots to queue"
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
|
||||
let market = sales.context.market
|
||||
|
||||
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
|
||||
err:
|
||||
error "Request failure, unable to calculate collateral", error = err.msg
|
||||
return
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
|
||||
if err of SlotsOutOfRangeError:
|
||||
warn "Too many slots, cannot add to queue"
|
||||
else:
|
||||
@ -314,10 +311,7 @@ proc onStorageRequested(sales: Sales,
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
|
||||
proc onSlotFreed(sales: Sales,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256) =
|
||||
|
||||
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
|
||||
logScope:
|
||||
topics = "marketplace sales onSlotFreed"
|
||||
requestId
|
||||
@ -325,44 +319,59 @@ proc onSlotFreed(sales: Sales,
|
||||
|
||||
trace "slot freed, adding to queue"
|
||||
|
||||
proc addSlotToQueue() {.async.} =
|
||||
proc addSlotToQueue() {.async: (raises: []).} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
# first attempt to populate request using existing slot metadata in queue
|
||||
without var found =? queue.populateItem(requestId,
|
||||
slotIndex.truncate(uint16)):
|
||||
trace "no existing request metadata, getting request info from contract"
|
||||
# if there's no existing slot for that request, retrieve the request
|
||||
# from the contract.
|
||||
without request =? await market.getRequest(requestId):
|
||||
error "unknown request in contract"
|
||||
try:
|
||||
without request =? (await market.getRequest(requestId)), err:
|
||||
error "unknown request in contract", error = err.msgDetail
|
||||
return
|
||||
|
||||
found = SlotQueueItem.init(request, slotIndex.truncate(uint16))
|
||||
# Take the repairing state into consideration to calculate the collateral.
|
||||
# This is particularly needed because it will affect the priority in the queue
|
||||
# and we want to give the user the ability to tweak the parameters.
|
||||
# Adding the repairing state directly in the queue priority calculation
|
||||
# would not allow this flexibility.
|
||||
without collateral =?
|
||||
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
|
||||
error "Failed to add freed slot to queue: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
|
||||
if err =? queue.push(found).errorOption:
|
||||
raise err
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
addSlotToQueue()
|
||||
.track(sales)
|
||||
.catch(proc(err: ref CatchableError) =
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists"
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running"
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
)
|
||||
without slotQueueItem =?
|
||||
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
|
||||
err:
|
||||
warn "Too many slots, cannot add to queue", error = err.msgDetail
|
||||
return
|
||||
|
||||
if err =? queue.push(slotQueueItem).errorOption:
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue because it already exists",
|
||||
error = err.msgDetail
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue because queue is not running",
|
||||
error = err.msgDetail
|
||||
except CancelledError as e:
|
||||
trace "sales.addSlotToQueue was cancelled"
|
||||
|
||||
# We could get rid of this by adding the storage ask in the SlotFreed event,
|
||||
# so we would not need to call getRequest to get the collateralPerSlot.
|
||||
let fut = addSlotToQueue()
|
||||
sales.trackedFutures.track(fut)
|
||||
|
||||
proc subscribeRequested(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onStorageRequested(requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) =
|
||||
proc onStorageRequested(
|
||||
requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
sales.onStorageRequested(requestId, ask, expiry)
|
||||
|
||||
try:
|
||||
@ -435,9 +444,13 @@ proc subscribeSlotFilled(sales: Sales) {.async.} =
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
trace "slot filled, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.truncate(uint16))
|
||||
queue.delete(requestId, slotIndex.uint16)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onSlotFilled(requestId, slotIndex)
|
||||
@ -454,7 +467,7 @@ proc subscribeSlotFreed(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotFreed(requestId: RequestId, slotIndex: uint64) =
|
||||
sales.onSlotFreed(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
@ -470,9 +483,13 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotReservationsFull(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) =
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
trace "reservations for slot full, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.truncate(uint16))
|
||||
queue.delete(requestId, slotIndex.uint16)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull)
|
||||
@ -482,21 +499,24 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot filled events", msg = e.msg
|
||||
|
||||
proc startSlotQueue(sales: Sales) {.async.} =
|
||||
proc startSlotQueue(sales: Sales) =
|
||||
let slotQueue = sales.context.slotQueue
|
||||
let reservations = sales.context.reservations
|
||||
|
||||
slotQueue.onProcessSlot =
|
||||
proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||
sales.processSlot(item, done)
|
||||
slotQueue.onProcessSlot = proc(item: SlotQueueItem) {.async: (raises: []).} =
|
||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||
try:
|
||||
await sales.processSlot(item)
|
||||
except CancelledError:
|
||||
discard
|
||||
|
||||
asyncSpawn slotQueue.start()
|
||||
slotQueue.start()
|
||||
|
||||
proc onAvailabilityAdded(availability: Availability) {.async.} =
|
||||
await sales.onAvailabilityAdded(availability)
|
||||
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
|
||||
if availability.enabled:
|
||||
await sales.OnAvailabilitySaved(availability)
|
||||
|
||||
reservations.onAvailabilityAdded = onAvailabilityAdded
|
||||
reservations.OnAvailabilitySaved = OnAvailabilitySaved
|
||||
|
||||
proc subscribe(sales: Sales) {.async.} =
|
||||
await sales.subscribeRequested()
|
||||
@ -518,7 +538,7 @@ proc unsubscribe(sales: Sales) {.async.} =
|
||||
|
||||
proc start*(sales: Sales) {.async.} =
|
||||
await sales.load()
|
||||
await sales.startSlotQueue()
|
||||
sales.startSlotQueue()
|
||||
await sales.subscribe()
|
||||
sales.running = true
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
## Nim-Codex
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
@ -7,34 +7,35 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
##
|
||||
## +--------------------------------------+
|
||||
## | RESERVATION |
|
||||
## +----------------------------------------+ |--------------------------------------|
|
||||
## | AVAILABILITY | | ReservationId | id | PK |
|
||||
## |----------------------------------------| |--------------------------------------|
|
||||
## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK |
|
||||
## |----------------------------------------| |--------------------------------------|
|
||||
## | UInt256 | totalSize | | | UInt256 | size | |
|
||||
## |----------------------------------------| |--------------------------------------|
|
||||
## | UInt256 | freeSize | | | UInt256 | slotIndex | |
|
||||
## |----------------------------------------| +--------------------------------------+
|
||||
## | UInt256 | duration | |
|
||||
## |----------------------------------------|
|
||||
## | UInt256 | minPrice | |
|
||||
## |----------------------------------------|
|
||||
## | UInt256 | maxCollateral | |
|
||||
## +----------------------------------------+
|
||||
## +--------------------------------------+
|
||||
## | RESERVATION |
|
||||
## +---------------------------------------------------+ |--------------------------------------|
|
||||
## | AVAILABILITY | | ReservationId | id | PK |
|
||||
## |---------------------------------------------------| |--------------------------------------|
|
||||
## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK |
|
||||
## |---------------------------------------------------| |--------------------------------------|
|
||||
## | UInt256 | totalSize | | | UInt256 | size | |
|
||||
## |---------------------------------------------------| |--------------------------------------|
|
||||
## | UInt256 | freeSize | | | UInt256 | slotIndex | |
|
||||
## |---------------------------------------------------| +--------------------------------------+
|
||||
## | UInt256 | duration | |
|
||||
## |---------------------------------------------------|
|
||||
## | UInt256 | minPricePerBytePerSecond | |
|
||||
## |---------------------------------------------------|
|
||||
## | UInt256 | totalCollateral | |
|
||||
## |---------------------------------------------------|
|
||||
## | UInt256 | totalRemainingCollateral | |
|
||||
## +---------------------------------------------------+
|
||||
|
||||
import pkg/upraises
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import std/typetraits
|
||||
import std/sequtils
|
||||
import std/times
|
||||
import pkg/chronos
|
||||
import pkg/datastore
|
||||
import pkg/nimcrypto
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
@ -51,9 +52,10 @@ import ../units
|
||||
export requests
|
||||
export logutils
|
||||
|
||||
logScope:
|
||||
topics = "sales reservations"
|
||||
from nimcrypto import randomBytes
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales reservations"
|
||||
|
||||
type
|
||||
AvailabilityId* = distinct array[32, byte]
|
||||
@ -62,28 +64,42 @@ type
|
||||
SomeStorableId = AvailabilityId | ReservationId
|
||||
Availability* = ref object
|
||||
id* {.serialize.}: AvailabilityId
|
||||
totalSize* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
minPrice* {.serialize.}: UInt256 # minimal price paid for the whole hosted slot for the request's duration
|
||||
maxCollateral* {.serialize.}: UInt256
|
||||
totalSize* {.serialize.}: uint64
|
||||
freeSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral {.serialize.}: UInt256
|
||||
totalRemainingCollateral* {.serialize.}: UInt256
|
||||
# If set to false, the availability will not accept new slots.
|
||||
# If enabled, it will not impact any existing slots that are already being hosted.
|
||||
enabled* {.serialize.}: bool
|
||||
# Specifies the latest timestamp after which the availability will no longer host any slots.
|
||||
# If set to 0, there will be no restrictions.
|
||||
until* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservation* = ref object
|
||||
id* {.serialize.}: ReservationId
|
||||
availabilityId* {.serialize.}: AvailabilityId
|
||||
size* {.serialize.}: UInt256
|
||||
size* {.serialize.}: uint64
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
slotIndex* {.serialize.}: uint64
|
||||
validUntil* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservations* = ref object of RootObj
|
||||
availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||
availabilityLock: AsyncLock
|
||||
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||
repo: RepoStore
|
||||
onAvailabilityAdded: ?OnAvailabilityAdded
|
||||
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
||||
OnAvailabilityAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
||||
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||
|
||||
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
|
||||
OnAvailabilitySaved* =
|
||||
proc(availability: Availability): Future[void] {.async: (raises: []).}
|
||||
StorableIter* = ref object
|
||||
finished*: bool
|
||||
next*: GetNext
|
||||
dispose*: IterDispose
|
||||
|
||||
ReservationsError* = object of CodexError
|
||||
ReserveFailedError* = object of ReservationsError
|
||||
ReleaseFailedError* = object of ReservationsError
|
||||
@ -93,13 +109,20 @@ type
|
||||
SerializationError* = object of ReservationsError
|
||||
UpdateFailedError* = object of ReservationsError
|
||||
BytesOutOfBoundsError* = object of ReservationsError
|
||||
UntilOutOfBoundsError* = object of ReservationsError
|
||||
|
||||
const
|
||||
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
|
||||
ReservationsKey = (SalesKey / "reservations").tryGet
|
||||
|
||||
proc hash*(x: AvailabilityId): Hash {.borrow.}
|
||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.}
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
template withLock(lock, body) =
|
||||
try:
|
||||
@ -109,35 +132,58 @@ template withLock(lock, body) =
|
||||
if lock.locked:
|
||||
lock.release()
|
||||
|
||||
|
||||
proc new*(T: type Reservations,
|
||||
repo: RepoStore): Reservations =
|
||||
|
||||
T(availabilityLock: newAsyncLock(),repo: repo)
|
||||
proc new*(T: type Reservations, repo: RepoStore): Reservations =
|
||||
T(availabilityLock: newAsyncLock(), repo: repo)
|
||||
|
||||
proc init*(
|
||||
_: type Availability,
|
||||
totalSize: UInt256,
|
||||
freeSize: UInt256,
|
||||
duration: UInt256,
|
||||
minPrice: UInt256,
|
||||
maxCollateral: UInt256): Availability =
|
||||
|
||||
_: type Availability,
|
||||
totalSize: uint64,
|
||||
freeSize: uint64,
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Availability =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
Availability(id: AvailabilityId(id), totalSize:totalSize, freeSize: freeSize, duration: duration, minPrice: minPrice, maxCollateral: maxCollateral)
|
||||
Availability(
|
||||
id: AvailabilityId(id),
|
||||
totalSize: totalSize,
|
||||
freeSize: freeSize,
|
||||
duration: duration,
|
||||
minPricePerBytePerSecond: minPricePerBytePerSecond,
|
||||
totalCollateral: totalCollateral,
|
||||
totalRemainingCollateral: totalCollateral,
|
||||
enabled: enabled,
|
||||
until: until,
|
||||
)
|
||||
|
||||
func totalCollateral*(self: Availability): UInt256 {.inline.} =
|
||||
return self.totalCollateral
|
||||
|
||||
proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} =
|
||||
self.totalCollateral = value
|
||||
self.totalRemainingCollateral = value
|
||||
|
||||
proc init*(
|
||||
_: type Reservation,
|
||||
availabilityId: AvailabilityId,
|
||||
size: UInt256,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256
|
||||
_: type Reservation,
|
||||
availabilityId: AvailabilityId,
|
||||
size: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
validUntil: SecondsSince1970,
|
||||
): Reservation =
|
||||
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
Reservation(id: ReservationId(id), availabilityId: availabilityId, size: size, requestId: requestId, slotIndex: slotIndex)
|
||||
Reservation(
|
||||
id: ReservationId(id),
|
||||
availabilityId: availabilityId,
|
||||
size: size,
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
validUntil: validUntil,
|
||||
)
|
||||
|
||||
func toArray(id: SomeStorableId): array[32, byte] =
|
||||
array[32, byte](id)
|
||||
@ -146,24 +192,27 @@ proc `==`*(x, y: AvailabilityId): bool {.borrow.}
|
||||
proc `==`*(x, y: ReservationId): bool {.borrow.}
|
||||
proc `==`*(x, y: Reservation): bool =
|
||||
x.id == y.id
|
||||
|
||||
proc `==`*(x, y: Availability): bool =
|
||||
x.id == y.id
|
||||
|
||||
proc `$`*(id: SomeStorableId): string = id.toArray.toHex
|
||||
proc `$`*(id: SomeStorableId): string =
|
||||
id.toArray.toHex
|
||||
|
||||
proc toErr[E1: ref CatchableError, E2: ReservationsError](
|
||||
e1: E1,
|
||||
_: type E2,
|
||||
msg: string = e1.msg): ref E2 =
|
||||
|
||||
e1: E1, _: type E2, msg: string = e1.msg
|
||||
): ref E2 =
|
||||
return newException(E2, msg, e1)
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, SomeStorableId): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, SomeStorableId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SomeStorableId):
|
||||
it.to0xHexLog
|
||||
|
||||
proc `onAvailabilityAdded=`*(self: Reservations,
|
||||
onAvailabilityAdded: OnAvailabilityAdded) =
|
||||
self.onAvailabilityAdded = some onAvailabilityAdded
|
||||
proc `OnAvailabilitySaved=`*(
|
||||
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
|
||||
) =
|
||||
self.OnAvailabilitySaved = some OnAvailabilitySaved
|
||||
|
||||
func key*(id: AvailabilityId): ?!Key =
|
||||
## sales / reservations / <availabilityId>
|
||||
@ -176,27 +225,39 @@ func key*(reservationId: ReservationId, availabilityId: AvailabilityId): ?!Key =
|
||||
func key*(availability: Availability): ?!Key =
|
||||
return availability.id.key
|
||||
|
||||
func maxCollateralPerByte*(availability: Availability): UInt256 =
|
||||
# If freeSize happens to be zero, we convention that the maxCollateralPerByte
|
||||
# should be equal to totalRemainingCollateral.
|
||||
if availability.freeSize == 0.uint64:
|
||||
return availability.totalRemainingCollateral
|
||||
|
||||
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
|
||||
|
||||
func key*(reservation: Reservation): ?!Key =
|
||||
return key(reservation.id, reservation.availabilityId)
|
||||
|
||||
func available*(self: Reservations): uint = self.repo.available.uint
|
||||
func available*(self: Reservations): uint =
|
||||
self.repo.available.uint
|
||||
|
||||
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
||||
self.repo.available(bytes.NBytes)
|
||||
|
||||
proc exists*(
|
||||
self: Reservations,
|
||||
key: Key): Future[bool] {.async.} =
|
||||
|
||||
self: Reservations, key: Key
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let exists = await self.repo.metaDs.ds.contains(key)
|
||||
return exists
|
||||
|
||||
proc getImpl(
|
||||
self: Reservations,
|
||||
key: Key): Future[?!seq[byte]] {.async.} =
|
||||
iterator items(self: StorableIter): auto =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc getImpl(
|
||||
self: Reservations, key: Key
|
||||
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
if not await self.exists(key):
|
||||
let err = newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||
let err =
|
||||
newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||
return failure(err)
|
||||
|
||||
without serialized =? await self.repo.metaDs.ds.get(key), error:
|
||||
@ -205,10 +266,8 @@ proc getImpl(
|
||||
return success serialized
|
||||
|
||||
proc get*(
|
||||
self: Reservations,
|
||||
key: Key,
|
||||
T: type SomeStorableObject): Future[?!T] {.async.} =
|
||||
|
||||
self: Reservations, key: Key, T: type SomeStorableObject
|
||||
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||
without serialized =? await self.getImpl(key), error:
|
||||
return failure(error)
|
||||
|
||||
@ -218,29 +277,29 @@ proc get*(
|
||||
return success obj
|
||||
|
||||
proc updateImpl(
|
||||
self: Reservations,
|
||||
obj: SomeStorableObject): Future[?!void] {.async.} =
|
||||
|
||||
self: Reservations, obj: SomeStorableObject
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "updating " & $(obj.type), id = obj.id
|
||||
|
||||
without key =? obj.key, error:
|
||||
return failure(error)
|
||||
|
||||
if err =? (await self.repo.metaDs.ds.put(
|
||||
key,
|
||||
@(obj.toJson.toBytes)
|
||||
)).errorOption:
|
||||
if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption:
|
||||
return failure(err.toErr(UpdateFailedError))
|
||||
|
||||
return success()
|
||||
|
||||
proc updateAvailability(
|
||||
self: Reservations,
|
||||
obj: Availability): Future[?!void] {.async.} =
|
||||
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
availabilityId = obj.id
|
||||
|
||||
if obj.until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
without key =? obj.key, error:
|
||||
return failure(error)
|
||||
|
||||
@ -249,68 +308,70 @@ proc updateAvailability(
|
||||
trace "Creating new Availability"
|
||||
let res = await self.updateImpl(obj)
|
||||
# inform subscribers that Availability has been added
|
||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await onAvailabilityAdded(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
else:
|
||||
return failure(err)
|
||||
|
||||
if obj.until > 0:
|
||||
without allReservations =? await self.all(Reservation, obj.id), error:
|
||||
error.msg = "Error updating reservation: " & error.msg
|
||||
return failure(error)
|
||||
|
||||
let requestEnds = allReservations.mapIt(it.validUntil)
|
||||
|
||||
if requestEnds.len > 0 and requestEnds.max > obj.until:
|
||||
let error = newException(
|
||||
UntilOutOfBoundsError,
|
||||
"Until parameter must be greater or equal to the longest currently hosted slot",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
|
||||
if oldAvailability.totalSize != obj.totalSize:
|
||||
trace "totalSize changed, updating repo reservation"
|
||||
if oldAvailability.totalSize < obj.totalSize: # storage added
|
||||
if reserveErr =? (await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes)).errorOption:
|
||||
if reserveErr =? (
|
||||
await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes)
|
||||
).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
elif oldAvailability.totalSize > obj.totalSize: # storage removed
|
||||
if reserveErr =? (await self.repo.release((oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes)).errorOption:
|
||||
if reserveErr =? (
|
||||
await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes)
|
||||
).errorOption:
|
||||
return failure(reserveErr.toErr(ReleaseFailedError))
|
||||
|
||||
let res = await self.updateImpl(obj)
|
||||
|
||||
if oldAvailability.freeSize < obj.freeSize: # availability added
|
||||
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
|
||||
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
|
||||
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
|
||||
# availability updated
|
||||
# inform subscribers that Availability has been modified (with increased
|
||||
# size)
|
||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await onAvailabilityAdded(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
||||
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
|
||||
proc update*(
|
||||
self: Reservations,
|
||||
obj: Reservation): Future[?!void] {.async.} =
|
||||
self: Reservations, obj: Reservation
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await self.updateImpl(obj)
|
||||
|
||||
proc update*(
|
||||
self: Reservations,
|
||||
obj: Availability): Future[?!void] {.async.} =
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to update the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc delete(
|
||||
self: Reservations,
|
||||
key: Key): Future[?!void] {.async.} =
|
||||
|
||||
self: Reservations, key: Key
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "deleting object", key
|
||||
|
||||
if not await self.exists(key):
|
||||
@ -322,28 +383,27 @@ proc delete(
|
||||
return success()
|
||||
|
||||
proc deleteReservation*(
|
||||
self: Reservations,
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId): Future[?!void] {.async.} =
|
||||
|
||||
self: Reservations,
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
returnedCollateral: ?UInt256 = UInt256.none,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
reservationId
|
||||
availabilityId
|
||||
|
||||
trace "deleting reservation"
|
||||
|
||||
without key =? key(reservationId, availabilityId), error:
|
||||
return failure(error)
|
||||
|
||||
withLock(self.availabilityLock):
|
||||
without reservation =? (await self.get(key, Reservation)), error:
|
||||
if error of NotExistsError:
|
||||
return success()
|
||||
else:
|
||||
return failure(error)
|
||||
|
||||
if reservation.size > 0.u256:
|
||||
trace "returning remaining reservation bytes to availability",
|
||||
size = reservation.size
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
without reservation =? (await self.get(key, Reservation)), error:
|
||||
if error of NotExistsError:
|
||||
return success()
|
||||
else:
|
||||
return failure(error)
|
||||
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
@ -351,38 +411,54 @@ proc deleteReservation*(
|
||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
availability.freeSize += reservation.size
|
||||
if reservation.size > 0.uint64:
|
||||
trace "returning remaining reservation bytes to availability",
|
||||
size = reservation.size
|
||||
availability.freeSize += reservation.size
|
||||
|
||||
if collateral =? returnedCollateral:
|
||||
availability.totalRemainingCollateral += collateral
|
||||
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
return failure(updateErr)
|
||||
|
||||
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
|
||||
return failure(err.toErr(DeleteFailedError))
|
||||
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
|
||||
return failure(err.toErr(DeleteFailedError))
|
||||
|
||||
return success()
|
||||
return success()
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to delete the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
# TODO: add support for deleting availabilities
|
||||
# To delete, must not have any active sales.
|
||||
|
||||
proc createAvailability*(
|
||||
self: Reservations,
|
||||
size: UInt256,
|
||||
duration: UInt256,
|
||||
minPrice: UInt256,
|
||||
maxCollateral: UInt256): Future[?!Availability] {.async.} =
|
||||
self: Reservations,
|
||||
size: uint64,
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Future[?!Availability] {.async: (raises: [CancelledError]).} =
|
||||
trace "creating availability",
|
||||
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
|
||||
trace "creating availability", size, duration, minPrice, maxCollateral
|
||||
if until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
let availability = Availability.init(
|
||||
size, size, duration, minPrice, maxCollateral
|
||||
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
)
|
||||
let bytes = availability.freeSize.truncate(uint)
|
||||
let bytes = availability.freeSize
|
||||
|
||||
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
if updateErr =? (await self.update(availability)).errorOption:
|
||||
|
||||
# rollback the reserve
|
||||
trace "rolling back reserve"
|
||||
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||
@ -394,115 +470,130 @@ proc createAvailability*(
|
||||
return success(availability)
|
||||
|
||||
method createReservation*(
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
slotSize: UInt256,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256
|
||||
): Future[?!Reservation] {.async, base.} =
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
slotSize: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?!Reservation] {.async: (raises: [CancelledError]), base.} =
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
|
||||
withLock(self.availabilityLock):
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
without availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
without availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
|
||||
if availability.freeSize < slotSize:
|
||||
let error = newException(
|
||||
BytesOutOfBoundsError,
|
||||
"trying to reserve an amount of bytes that is greater than the free size of the Availability",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
|
||||
if availability.freeSize < slotSize:
|
||||
let error = newException(
|
||||
BytesOutOfBoundsError,
|
||||
"trying to reserve an amount of bytes that is greater than the total size of the Availability")
|
||||
return failure(error)
|
||||
trace "Creating reservation",
|
||||
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
|
||||
|
||||
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
|
||||
let reservation =
|
||||
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
|
||||
|
||||
let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex)
|
||||
if createResErr =? (await self.update(reservation)).errorOption:
|
||||
return failure(createResErr)
|
||||
|
||||
if createResErr =? (await self.update(reservation)).errorOption:
|
||||
return failure(createResErr)
|
||||
# reduce availability freeSize by the slot size, which is now accounted for in
|
||||
# the newly created Reservation
|
||||
availability.freeSize -= slotSize
|
||||
|
||||
# reduce availability freeSize by the slot size, which is now accounted for in
|
||||
# the newly created Reservation
|
||||
availability.freeSize -= slotSize
|
||||
# adjust the remaining totalRemainingCollateral
|
||||
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
|
||||
|
||||
# update availability with reduced size
|
||||
trace "Updating availability with reduced size"
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
trace "Updating availability failed, rolling back reservation creation"
|
||||
# update availability with reduced size
|
||||
trace "Updating availability with reduced size", freeSize = availability.freeSize
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
trace "Updating availability failed, rolling back reservation creation"
|
||||
|
||||
without key =? reservation.key, keyError:
|
||||
keyError.parent = updateErr
|
||||
return failure(keyError)
|
||||
without key =? reservation.key, keyError:
|
||||
keyError.parent = updateErr
|
||||
return failure(keyError)
|
||||
|
||||
# rollback the reservation creation
|
||||
if rollbackErr =? (await self.delete(key)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
# rollback the reservation creation
|
||||
if rollbackErr =? (await self.delete(key)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
return failure(updateErr)
|
||||
return failure(updateErr)
|
||||
|
||||
trace "Reservation succesfully created"
|
||||
return success(reservation)
|
||||
trace "Reservation succesfully created"
|
||||
return success(reservation)
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to delete the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc returnBytesToAvailability*(
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
reservationId: ReservationId,
|
||||
bytes: UInt256): Future[?!void] {.async.} =
|
||||
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
reservationId: ReservationId,
|
||||
bytes: uint64,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
reservationId
|
||||
availabilityId
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
without key =? key(reservationId, availabilityId), error:
|
||||
return failure(error)
|
||||
|
||||
withLock(self.availabilityLock):
|
||||
without key =? key(reservationId, availabilityId), error:
|
||||
return failure(error)
|
||||
without var reservation =? (await self.get(key, Reservation)), error:
|
||||
return failure(error)
|
||||
|
||||
without var reservation =? (await self.get(key, Reservation)), error:
|
||||
return failure(error)
|
||||
# We are ignoring bytes that are still present in the Reservation because
|
||||
# they will be returned to Availability through `deleteReservation`.
|
||||
let bytesToBeReturned = bytes - reservation.size
|
||||
|
||||
# We are ignoring bytes that are still present in the Reservation because
|
||||
# they will be returned to Availability through `deleteReservation`.
|
||||
let bytesToBeReturned = bytes - reservation.size
|
||||
if bytesToBeReturned == 0:
|
||||
trace "No bytes are returned",
|
||||
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||
return success()
|
||||
|
||||
trace "Returning bytes",
|
||||
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||
|
||||
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
||||
# is depleted then we will fail-fast as there is nothing to be done atm.
|
||||
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
|
||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
availability.freeSize += bytesToBeReturned
|
||||
|
||||
# Update availability with returned size
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
trace "Rolling back returning bytes"
|
||||
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
return failure(updateErr)
|
||||
|
||||
if bytesToBeReturned == 0:
|
||||
trace "No bytes are returned", requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||
return success()
|
||||
|
||||
trace "Returning bytes", requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||
|
||||
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
||||
# is depleted then we will fail-fast as there is nothing to be done atm.
|
||||
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
|
||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
availability.freeSize += bytesToBeReturned
|
||||
|
||||
# Update availability with returned size
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
|
||||
trace "Rolling back returning bytes"
|
||||
if rollbackErr =? (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
return failure(updateErr)
|
||||
|
||||
return success()
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when returning bytes to the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc release*(
|
||||
self: Reservations,
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
bytes: uint): Future[?!void] {.async.} =
|
||||
|
||||
self: Reservations,
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
bytes: uint,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
topics = "release"
|
||||
bytes
|
||||
@ -517,20 +608,20 @@ proc release*(
|
||||
without var reservation =? (await self.get(key, Reservation)), error:
|
||||
return failure(error)
|
||||
|
||||
if reservation.size < bytes.u256:
|
||||
if reservation.size < bytes:
|
||||
let error = newException(
|
||||
BytesOutOfBoundsError,
|
||||
"trying to release an amount of bytes that is greater than the total size of the Reservation")
|
||||
"trying to release an amount of bytes that is greater than the total size of the Reservation",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||
return failure(releaseErr.toErr(ReleaseFailedError))
|
||||
|
||||
reservation.size -= bytes.u256
|
||||
reservation.size -= bytes
|
||||
|
||||
# persist partially used Reservation with updated size
|
||||
if err =? (await self.update(reservation)).errorOption:
|
||||
|
||||
# rollback release if an update error encountered
|
||||
trace "rolling back release"
|
||||
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||
@ -540,16 +631,9 @@ proc release*(
|
||||
|
||||
return success()
|
||||
|
||||
iterator items(self: StorableIter): Future[?seq[byte]] =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc storables(
|
||||
self: Reservations,
|
||||
T: type SomeStorableObject,
|
||||
queryKey: Key = ReservationsKey
|
||||
): Future[?!StorableIter] {.async.} =
|
||||
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
|
||||
var iter = StorableIter()
|
||||
let query = Query.init(queryKey)
|
||||
when T is Availability:
|
||||
@ -567,20 +651,16 @@ proc storables(
|
||||
return failure(error)
|
||||
|
||||
# /sales/reservations
|
||||
proc next(): Future[?seq[byte]] {.async.} =
|
||||
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
await idleAsync()
|
||||
iter.finished = results.finished
|
||||
if not results.finished and
|
||||
res =? (await results.next()) and
|
||||
res.data.len > 0 and
|
||||
key =? res.key and
|
||||
key.namespaces.len == defaultKey.namespaces.len:
|
||||
|
||||
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
||||
key =? res.key and key.namespaces.len == defaultKey.namespaces.len:
|
||||
return some res.data
|
||||
|
||||
return none seq[byte]
|
||||
|
||||
proc dispose(): Future[?!void] {.async.} =
|
||||
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await results.dispose()
|
||||
|
||||
iter.next = next
|
||||
@ -588,70 +668,74 @@ proc storables(
|
||||
return success iter
|
||||
|
||||
proc allImpl(
|
||||
self: Reservations,
|
||||
T: type SomeStorableObject,
|
||||
queryKey: Key = ReservationsKey
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
var ret: seq[T] = @[]
|
||||
|
||||
without storables =? (await self.storables(T, queryKey)), error:
|
||||
return failure(error)
|
||||
|
||||
for storable in storables.items:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
try:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes),
|
||||
error = error.msg
|
||||
continue
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes), error = error.msg
|
||||
continue
|
||||
|
||||
ret.add obj
|
||||
ret.add obj
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
error "Error when retrieving storable", error = err.msg
|
||||
continue
|
||||
|
||||
return success(ret)
|
||||
|
||||
proc all*(
|
||||
self: Reservations,
|
||||
T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
return await self.allImpl(T)
|
||||
|
||||
proc all*(
|
||||
self: Reservations,
|
||||
T: type SomeStorableObject,
|
||||
availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
without key =? (ReservationsKey / $availabilityId):
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
without key =? key(availabilityId):
|
||||
return failure("no key")
|
||||
|
||||
return await self.allImpl(T, key)
|
||||
|
||||
proc findAvailability*(
|
||||
self: Reservations,
|
||||
size, duration, minPrice, collateral: UInt256
|
||||
): Future[?Availability] {.async.} =
|
||||
|
||||
self: Reservations,
|
||||
size, duration: uint64,
|
||||
pricePerBytePerSecond, collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?Availability] {.async: (raises: [CancelledError]).} =
|
||||
without storables =? (await self.storables(Availability)), e:
|
||||
error "failed to get all storables", error = e.msg
|
||||
return none Availability
|
||||
|
||||
for item in storables.items:
|
||||
if bytes =? (await item) and
|
||||
availability =? Availability.fromJson(bytes):
|
||||
|
||||
if size <= availability.freeSize and
|
||||
duration <= availability.duration and
|
||||
collateral <= availability.maxCollateral and
|
||||
minPrice >= availability.minPrice:
|
||||
|
||||
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
||||
if availability.enabled and size <= availability.freeSize and
|
||||
duration <= availability.duration and
|
||||
collateralPerByte <= availability.maxCollateralPerByte and
|
||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
|
||||
(availability.until == 0 or availability.until >= validUntil):
|
||||
trace "availability matched",
|
||||
id = availability.id,
|
||||
size, availFreeSize = availability.freeSize,
|
||||
duration, availDuration = availability.duration,
|
||||
minPrice, availMinPrice = availability.minPrice,
|
||||
collateral, availMaxCollateral = availability.maxCollateral
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
availDuration = availability.duration,
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
||||
# to automatically dispose our iterators when they fall out of scope.
|
||||
@ -663,7 +747,13 @@ proc findAvailability*(
|
||||
|
||||
trace "availability did not match",
|
||||
id = availability.id,
|
||||
size, availFreeSize = availability.freeSize,
|
||||
duration, availDuration = availability.duration,
|
||||
minPrice, availMinPrice = availability.minPrice,
|
||||
collateral, availMaxCollateral = availability.maxCollateral
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
availDuration = availability.duration,
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user