mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-03 14:03:10 +00:00
Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60861d6af8 | ||
|
|
49e801803f | ||
|
|
858101c74c | ||
|
|
bd49591fff | ||
|
|
6765beee2c | ||
|
|
45fec4b524 | ||
|
|
9ac9f6ff3c | ||
|
|
bd36032251 | ||
|
|
be759baf4d | ||
|
|
6147a751f1 | ||
|
|
ee47ca8760 | ||
|
|
f791a960f2 | ||
|
|
db8f866db4 | ||
|
|
7aca2f0e61 | ||
|
|
072bff5cab | ||
|
|
af55a761e6 | ||
|
|
e3d8d195c3 | ||
|
|
d1f2e2399b | ||
|
|
8cd10edb69 | ||
|
|
6cf99e255c | ||
|
|
7eb2fb12cc | ||
|
|
352273ff81 | ||
|
|
9ef9258720 | ||
|
|
7927afe715 | ||
|
|
01615354af | ||
|
|
baff902137 | ||
|
|
4d44154a40 | ||
|
|
e1c397e112 | ||
|
|
7b660e3554 | ||
|
|
c5e424ff1b | ||
|
|
36f64ad3e6 | ||
|
|
235c0ec842 | ||
|
|
d443df441d | ||
|
|
e35aec7870 | ||
|
|
93e4e0f177 | ||
|
|
6db6bf5f72 | ||
|
|
b305e00160 | ||
|
|
3d2d8273e6 | ||
|
|
e324ac8ca5 | ||
|
|
f267d99ea8 | ||
|
|
8af73e02a9 | ||
|
|
27d807a841 | ||
|
|
85823342e9 | ||
|
|
09a8419942 | ||
|
|
7502b9ad2c | ||
|
|
3e17207a0b | ||
|
|
1bea94c390 | ||
|
|
ffbbee01b1 | ||
|
|
2dd436bfb7 | ||
|
|
2e1306ac2d | ||
|
|
45ade0e3c1 | ||
|
|
ca869f6dce | ||
|
|
e43872d0b8 | ||
|
|
d59c5b023c | ||
|
|
28a83db69e | ||
|
|
13811825b3 | ||
|
|
827d9ccccf | ||
|
|
c689542579 | ||
|
|
71422f0d3d | ||
|
|
25a8077e80 | ||
|
|
bfbd7264df | ||
|
|
f7d06cd0e8 | ||
|
|
748830570a | ||
|
|
bde98738c2 | ||
|
|
28e87d06cc | ||
|
|
f144099377 | ||
|
|
19a5e05c13 | ||
|
|
b39d541227 | ||
|
|
d220e53fe1 | ||
|
|
2eb83a0ebb | ||
|
|
22f5150d1d | ||
|
|
0f152d333c | ||
|
|
acf81d0571 | ||
|
|
7c7871ac75 | ||
|
|
b92f79a654 | ||
|
|
6f62afef75 | ||
|
|
4e2a321ad5 | ||
|
|
1213377ac4 | ||
|
|
e9c6d19873 | ||
|
|
5ec3b2b027 | ||
|
|
0ec52abc98 | ||
|
|
0032e60398 | ||
|
|
7deeb7d2b3 | ||
|
|
60b6996eb0 | ||
|
|
a0d6fbaf02 | ||
|
|
709a8648fd | ||
|
|
110147d8ef | ||
|
|
3a312596bf | ||
|
|
9d7b521519 | ||
|
|
54177e9fbf | ||
|
|
75db491d84 | ||
|
|
f1b84dc6d1 | ||
|
|
a5db757de3 | ||
|
|
a0ddcef08d | ||
|
|
1cac3e2a11 | ||
|
|
2538ff8da3 | ||
|
|
17d3bb55cf | ||
|
|
703921df32 | ||
|
|
2a3a29720f | ||
|
|
eb09e610d5 | ||
|
|
7065718e09 | ||
|
|
fab5e16afd |
30
.github/actions/nimbus-build-system/action.yml
vendored
30
.github/actions/nimbus-build-system/action.yml
vendored
@ -81,35 +81,35 @@ runs:
|
|||||||
mingw-w64-i686-ntldd-git
|
mingw-w64-i686-ntldd-git
|
||||||
mingw-w64-i686-rust
|
mingw-w64-i686-rust
|
||||||
|
|
||||||
- name: MSYS2 (Windows All) - Update to gcc 14
|
|
||||||
if: inputs.os == 'windows'
|
|
||||||
shell: ${{ inputs.shell }} {0}
|
|
||||||
run: |
|
|
||||||
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
|
|
||||||
|
|
||||||
- name: Install gcc 14 on Linux
|
- name: Install gcc 14 on Linux
|
||||||
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
|
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
|
||||||
if : ${{ inputs.os == 'linux' && !inputs.coverage }}
|
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
# Add GCC-14 to alternatives
|
# Skip for older Ubuntu versions
|
||||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
if [[ $(lsb_release -r | awk -F '[^0-9]+' '{print $2}') -ge 24 ]]; then
|
||||||
# Set GCC-14 as the default
|
# Install GCC-14
|
||||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
sudo apt-get update -qq
|
||||||
|
sudo apt-get install -yq gcc-14
|
||||||
|
# Add GCC-14 to alternatives
|
||||||
|
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||||
|
# Set GCC-14 as the default
|
||||||
|
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Install ccache on Linux/Mac
|
- name: Install ccache on Linux/Mac
|
||||||
if: inputs.os == 'linux' || inputs.os == 'macos'
|
if: inputs.os == 'linux' || inputs.os == 'macos'
|
||||||
uses: hendrikmuhs/ccache-action@v1.2
|
uses: hendrikmuhs/ccache-action@v1.2
|
||||||
with:
|
with:
|
||||||
create-symlink: true
|
create-symlink: true
|
||||||
key: ${{ matrix.os }}-${{ matrix.builder }}-${{ matrix.cpu }}-${{ matrix.tests }}-${{ matrix.nim_version }}
|
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
|
||||||
evict-old-files: 7d
|
evict-old-files: 7d
|
||||||
|
|
||||||
- name: Install ccache on Windows
|
- name: Install ccache on Windows
|
||||||
if: inputs.os == 'windows'
|
if: inputs.os == 'windows'
|
||||||
uses: hendrikmuhs/ccache-action@v1.2
|
uses: hendrikmuhs/ccache-action@v1.2
|
||||||
with:
|
with:
|
||||||
key: ${{ matrix.os }}-${{ matrix.builder }}-${{ matrix.cpu }}-${{ matrix.tests }}-${{ matrix.nim_version }}
|
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
|
||||||
evict-old-files: 7d
|
evict-old-files: 7d
|
||||||
|
|
||||||
- name: Enable ccache on Windows
|
- name: Enable ccache on Windows
|
||||||
@ -202,7 +202,7 @@ runs:
|
|||||||
- name: Restore Nim toolchain binaries from cache
|
- name: Restore Nim toolchain binaries from cache
|
||||||
id: nim-cache
|
id: nim-cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
if : ${{ !inputs.coverage }}
|
if : ${{ inputs.coverage != 'true' }}
|
||||||
with:
|
with:
|
||||||
path: NimBinaries
|
path: NimBinaries
|
||||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||||
@ -218,7 +218,7 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
git config --global core.symlinks false
|
git config --global core.symlinks false
|
||||||
|
|
||||||
- name: Build Nim and Codex dependencies
|
- name: Build Nim and Logos Storage dependencies
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
which gcc
|
which gcc
|
||||||
|
|||||||
28
.github/workflows/Readme.md
vendored
28
.github/workflows/Readme.md
vendored
@ -3,12 +3,14 @@ Tips for shorter build times
|
|||||||
|
|
||||||
### Runner availability ###
|
### Runner availability ###
|
||||||
|
|
||||||
Currently, the biggest bottleneck when optimizing workflows is the availability
|
When running on the Github free, pro or team plan, the bottleneck when
|
||||||
of Windows and macOS runners. Therefore, anything that reduces the time spent in
|
optimizing workflows is the availability of macOS runners. Therefore, anything
|
||||||
Windows or macOS jobs will have a positive impact on the time waiting for
|
that reduces the time spent in macOS jobs will have a positive impact on the
|
||||||
runners to become available. The usage limits for Github Actions are [described
|
time waiting for runners to become available. On the Github enterprise plan,
|
||||||
here][limits]. You can see a breakdown of runner usage for your jobs in the
|
this is not the case and you can more freely use parallelization on multiple
|
||||||
Github Actions tab ([example][usage]).
|
runners. The usage limits for Github Actions are [described here][limits]. You
|
||||||
|
can see a breakdown of runner usage for your jobs in the Github Actions tab
|
||||||
|
([example][usage]).
|
||||||
|
|
||||||
### Windows is slow ###
|
### Windows is slow ###
|
||||||
|
|
||||||
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
|
|||||||
|
|
||||||
Breaking up a long build job into several jobs that you run in parallel can have
|
Breaking up a long build job into several jobs that you run in parallel can have
|
||||||
a positive impact on the wall clock time that a workflow runs. For instance, you
|
a positive impact on the wall clock time that a workflow runs. For instance, you
|
||||||
might consider running unit tests and integration tests in parallel. Keep in
|
might consider running unit tests and integration tests in parallel. When
|
||||||
mind however that availability of macOS and Windows runners is the biggest
|
running on the Github free, pro or team plan, keep in mind that availability of
|
||||||
bottleneck. If you split a Windows job into two jobs, you now need to wait for
|
macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
|
||||||
two Windows runners to become available! Therefore parallelization often only
|
need to wait for two macOS runners to become available.
|
||||||
makes sense for Linux jobs.
|
|
||||||
|
|
||||||
### Refactoring ###
|
### Refactoring ###
|
||||||
|
|
||||||
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
|
|||||||
to know whether you introduced a failure on all platforms, or only on a single
|
to know whether you introduced a failure on all platforms, or only on a single
|
||||||
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
|
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
|
||||||
runners busy for longer on a workflow that you know is going to fail anyway.
|
runners busy for longer on a workflow that you know is going to fail anyway.
|
||||||
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
|
Consequent runs will therefore take longer to start. Fail fast is most likely
|
||||||
|
better for overall development speed.
|
||||||
|
|
||||||
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
|
[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
|
||||||
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
|
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
|
||||||
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||||
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache
|
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache
|
||||||
|
|||||||
21
.github/workflows/ci-reusable.yml
vendored
21
.github/workflows/ci-reusable.yml
vendored
@ -24,9 +24,9 @@ jobs:
|
|||||||
run:
|
run:
|
||||||
shell: ${{ matrix.shell }} {0}
|
shell: ${{ matrix.shell }} {0}
|
||||||
|
|
||||||
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}
|
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
|
||||||
runs-on: ${{ matrix.builder }}
|
runs-on: ${{ matrix.builder }}
|
||||||
timeout-minutes: 120
|
timeout-minutes: 90
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@ -38,6 +38,7 @@ jobs:
|
|||||||
uses: ./.github/actions/nimbus-build-system
|
uses: ./.github/actions/nimbus-build-system
|
||||||
with:
|
with:
|
||||||
os: ${{ matrix.os }}
|
os: ${{ matrix.os }}
|
||||||
|
cpu: ${{ matrix.cpu }}
|
||||||
shell: ${{ matrix.shell }}
|
shell: ${{ matrix.shell }}
|
||||||
nim_version: ${{ matrix.nim_version }}
|
nim_version: ${{ matrix.nim_version }}
|
||||||
coverage: false
|
coverage: false
|
||||||
@ -47,20 +48,22 @@ jobs:
|
|||||||
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
||||||
run: make -j${ncpu} test
|
run: make -j${ncpu} test
|
||||||
|
|
||||||
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
|
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 18.15
|
node-version: 22
|
||||||
|
|
||||||
- name: Start Ethereum node with Codex contracts
|
- name: Start Ethereum node with Logos Storage contracts
|
||||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||||
working-directory: vendor/codex-contracts-eth
|
working-directory: vendor/logos-storage-contracts-eth
|
||||||
env:
|
env:
|
||||||
MSYS2_PATH_TYPE: inherit
|
MSYS2_PATH_TYPE: inherit
|
||||||
run: |
|
run: |
|
||||||
npm install
|
npm ci
|
||||||
npm start &
|
npm start &
|
||||||
|
# Wait for the contracts to be deployed
|
||||||
|
sleep 5
|
||||||
|
|
||||||
## Part 2 Tests ##
|
## Part 2 Tests ##
|
||||||
- name: Contract tests
|
- name: Contract tests
|
||||||
@ -70,13 +73,15 @@ jobs:
|
|||||||
## Part 3 Tests ##
|
## Part 3 Tests ##
|
||||||
- name: Integration tests
|
- name: Integration tests
|
||||||
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
||||||
|
env:
|
||||||
|
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
|
||||||
run: make -j${ncpu} testIntegration
|
run: make -j${ncpu} testIntegration
|
||||||
|
|
||||||
- name: Upload integration tests log files
|
- name: Upload integration tests log files
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
|
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
|
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
|
||||||
path: tests/integration/logs/
|
path: tests/integration/logs/
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
|
|||||||
30
.github/workflows/ci.yml
vendored
30
.github/workflows/ci.yml
vendored
@ -9,36 +9,28 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
nim_version: v2.0.14
|
nim_version: v2.2.4
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||||
cache_nonce: ${{ env.cache_nonce }}
|
cache_nonce: ${{ env.cache_nonce }}
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v4
|
||||||
- name: Compute matrix
|
- name: Compute matrix
|
||||||
id: matrix
|
id: matrix
|
||||||
uses: fabiocaccamo/create-matrix-action@v5
|
run: |
|
||||||
with:
|
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
|
||||||
matrix: |
|
tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
echo 'EOF' >> $GITHUB_OUTPUT
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
|
||||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
|
||||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
|
||||||
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
needs: matrix
|
needs: matrix
|
||||||
@ -61,11 +53,7 @@ jobs:
|
|||||||
suggest: true
|
suggest: true
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
# Force to stick to ubuntu 20.04 for coverage because
|
runs-on: ubuntu-latest
|
||||||
# lcov was updated to 2.x version in ubuntu-latest
|
|
||||||
# and cause a lot of issues.
|
|
||||||
# See https://github.com/linux-test-project/lcov/issues/238
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
19
.github/workflows/conventional-commits.yml
vendored
Normal file
19
.github/workflows/conventional-commits.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
name: Conventional Commits Linting
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
workflow_dispatch:
|
||||||
|
merge_group:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pr-title:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
steps:
|
||||||
|
- name: PR Conventional Commit Validation
|
||||||
|
uses: ytanikin/pr-conventional-commits@1.4.1
|
||||||
|
with:
|
||||||
|
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'
|
||||||
38
.github/workflows/docker-dist-tests.yml
vendored
38
.github/workflows/docker-dist-tests.yml
vendored
@ -1,38 +0,0 @@
|
|||||||
name: Docker - Dist-Tests
|
|
||||||
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
paths-ignore:
|
|
||||||
- '**/*.md'
|
|
||||||
- '.gitignore'
|
|
||||||
- '.github/**'
|
|
||||||
- '!.github/workflows/docker-dist-tests.yml'
|
|
||||||
- '!.github/workflows/docker-reusable.yml'
|
|
||||||
- 'docker/**'
|
|
||||||
- '!docker/codex.Dockerfile'
|
|
||||||
- '!docker/docker-entrypoint.sh'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
run_release_tests:
|
|
||||||
description: Run Release tests
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push:
|
|
||||||
name: Build and Push
|
|
||||||
uses: ./.github/workflows/docker-reusable.yml
|
|
||||||
with:
|
|
||||||
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
|
|
||||||
nat_ip_auto: true
|
|
||||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
|
||||||
tag_suffix: dist-tests
|
|
||||||
run_release_tests: ${{ inputs.run_release_tests }}
|
|
||||||
secrets: inherit
|
|
||||||
78
.github/workflows/docker-reusable.yml
vendored
78
.github/workflows/docker-reusable.yml
vendored
@ -34,6 +34,11 @@ on:
|
|||||||
description: Set latest tag for Docker images
|
description: Set latest tag for Docker images
|
||||||
required: false
|
required: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
tag_stable:
|
||||||
|
default: false
|
||||||
|
description: Set stable tag for Docker images
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
tag_sha:
|
tag_sha:
|
||||||
default: true
|
default: true
|
||||||
description: Set Git short commit as Docker tag
|
description: Set Git short commit as Docker tag
|
||||||
@ -59,6 +64,14 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: false
|
default: false
|
||||||
|
contract_image:
|
||||||
|
description: Specifies compatible smart contract image
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
outputs:
|
||||||
|
codex_image:
|
||||||
|
description: Logos Storage Docker image tag
|
||||||
|
value: ${{ jobs.publish.outputs.codex_image }}
|
||||||
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -69,10 +82,12 @@ env:
|
|||||||
NIMFLAGS: ${{ inputs.nimflags }}
|
NIMFLAGS: ${{ inputs.nimflags }}
|
||||||
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
|
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
|
||||||
TAG_LATEST: ${{ inputs.tag_latest }}
|
TAG_LATEST: ${{ inputs.tag_latest }}
|
||||||
|
TAG_STABLE: ${{ inputs.tag_stable }}
|
||||||
TAG_SHA: ${{ inputs.tag_sha }}
|
TAG_SHA: ${{ inputs.tag_sha }}
|
||||||
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
||||||
|
CONTRACT_IMAGE: ${{ inputs.contract_image }}
|
||||||
# Tests
|
# Tests
|
||||||
TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
|
||||||
TESTS_BRANCH: master
|
TESTS_BRANCH: master
|
||||||
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
|
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
|
||||||
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
|
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
|
||||||
@ -80,8 +95,20 @@ env:
|
|||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
# Compute variables
|
||||||
|
compute:
|
||||||
|
name: Compute build ID
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
build_id: ${{ steps.build_id.outputs.build_id }}
|
||||||
|
steps:
|
||||||
|
- name: Generate unique build id
|
||||||
|
id: build_id
|
||||||
|
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
# Build platform specific image
|
# Build platform specific image
|
||||||
build:
|
build:
|
||||||
|
needs: compute
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: true
|
fail-fast: true
|
||||||
matrix:
|
matrix:
|
||||||
@ -108,11 +135,19 @@ jobs:
|
|||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Docker - Variables
|
||||||
|
run: |
|
||||||
|
# Create contract label for compatible contract image if specified
|
||||||
|
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||||
|
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Docker - Meta
|
- name: Docker - Meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ env.DOCKER_REPO }}
|
images: ${{ env.DOCKER_REPO }}
|
||||||
|
labels: ${{ env.CONTRACT_LABEL }}
|
||||||
|
|
||||||
- name: Docker - Set up Buildx
|
- name: Docker - Set up Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
@ -147,7 +182,7 @@ jobs:
|
|||||||
- name: Docker - Upload digest
|
- name: Docker - Upload digest
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: digests-${{ matrix.target.arch }}
|
name: digests-${{ needs.compute.outputs.build_id }}-${{ matrix.target.arch }}
|
||||||
path: /tmp/digests/*
|
path: /tmp/digests/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
@ -159,35 +194,41 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.meta.outputs.version }}
|
version: ${{ steps.meta.outputs.version }}
|
||||||
needs: build
|
codex_image: ${{ steps.image_tag.outputs.codex_image }}
|
||||||
|
needs: [build, compute]
|
||||||
steps:
|
steps:
|
||||||
- name: Docker - Variables
|
- name: Docker - Variables
|
||||||
run: |
|
run: |
|
||||||
# Adjust custom suffix when set and
|
# Adjust custom suffix when set
|
||||||
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
|
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
|
||||||
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
# Disable SHA tags on tagged release
|
# Disable SHA tags on tagged release
|
||||||
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
|
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
|
||||||
echo "TAG_SHA=false" >>$GITHUB_ENV
|
echo "TAG_SHA=false" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
# Handle latest and latest-custom using raw
|
# Handle latest and latest-custom using raw
|
||||||
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
|
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
|
||||||
echo "TAG_LATEST=false" >>$GITHUB_ENV
|
echo "TAG_LATEST=false" >> $GITHUB_ENV
|
||||||
echo "TAG_RAW=true" >>$GITHUB_ENV
|
echo "TAG_RAW=true" >> $GITHUB_ENV
|
||||||
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
|
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
|
||||||
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
|
echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "TAG_RAW=false" >>$GITHUB_ENV
|
echo "TAG_RAW=false" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create contract label for compatible contract image if specified
|
||||||
|
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||||
|
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Docker - Download digests
|
- name: Docker - Download digests
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
pattern: digests-*
|
pattern: digests-${{ needs.compute.outputs.build_id }}-*
|
||||||
merge-multiple: true
|
merge-multiple: true
|
||||||
path: /tmp/digests
|
path: /tmp/digests
|
||||||
|
|
||||||
@ -199,12 +240,14 @@ jobs:
|
|||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ env.DOCKER_REPO }}
|
images: ${{ env.DOCKER_REPO }}
|
||||||
|
labels: ${{ env.CONTRACT_LABEL }}
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ env.TAG_LATEST }}
|
latest=${{ env.TAG_LATEST }}
|
||||||
suffix=${{ env.TAG_SUFFIX }},onlatest=true
|
suffix=${{ env.TAG_SUFFIX }},onlatest=true
|
||||||
tags: |
|
tags: |
|
||||||
type=semver,pattern={{version}}
|
type=semver,pattern={{version}}
|
||||||
type=raw,enable=${{ env.TAG_RAW }},value=latest
|
type=raw,enable=${{ env.TAG_RAW }},value=latest
|
||||||
|
type=raw,enable=${{ env.TAG_STABLE }},value=stable
|
||||||
type=sha,enable=${{ env.TAG_SHA }}
|
type=sha,enable=${{ env.TAG_SHA }}
|
||||||
|
|
||||||
- name: Docker - Login to Docker Hub
|
- name: Docker - Login to Docker Hub
|
||||||
@ -219,9 +262,12 @@ jobs:
|
|||||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||||
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
|
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
|
||||||
|
|
||||||
|
- name: Docker - Image tag
|
||||||
|
id: image_tag
|
||||||
|
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
- name: Docker - Inspect image
|
- name: Docker - Inspect image
|
||||||
run: |
|
run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
|
||||||
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
|
|
||||||
|
|
||||||
|
|
||||||
# Compute Tests inputs
|
# Compute Tests inputs
|
||||||
@ -270,7 +316,7 @@ jobs:
|
|||||||
max-parallel: 1
|
max-parallel: 1
|
||||||
matrix:
|
matrix:
|
||||||
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
|
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
|
||||||
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
|
||||||
with:
|
with:
|
||||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||||
@ -287,7 +333,7 @@ jobs:
|
|||||||
name: Run Release Tests
|
name: Run Release Tests
|
||||||
needs: [compute-tests-inputs]
|
needs: [compute-tests-inputs]
|
||||||
if: ${{ inputs.run_release_tests == 'true' }}
|
if: ${{ inputs.run_release_tests == 'true' }}
|
||||||
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master
|
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
|
||||||
with:
|
with:
|
||||||
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
source: ${{ needs.compute-tests-inputs.outputs.source }}
|
||||||
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
|
||||||
|
|||||||
20
.github/workflows/docker.yml
vendored
20
.github/workflows/docker.yml
vendored
@ -18,11 +18,27 @@ on:
|
|||||||
- '!docker/docker-entrypoint.sh'
|
- '!docker/docker-entrypoint.sh'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
get-contracts-hash:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
hash: ${{ steps.get-hash.outputs.hash }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Get submodule short hash
|
||||||
|
id: get-hash
|
||||||
|
run: |
|
||||||
|
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
|
||||||
|
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||||
build-and-push:
|
build-and-push:
|
||||||
name: Build and Push
|
name: Build and Push
|
||||||
uses: ./.github/workflows/docker-reusable.yml
|
uses: ./.github/workflows/docker-reusable.yml
|
||||||
|
needs: get-contracts-hash
|
||||||
with:
|
with:
|
||||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||||
secrets: inherit
|
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
|
||||||
|
secrets: inherit
|
||||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@ -52,7 +52,7 @@ jobs:
|
|||||||
node-version: 18
|
node-version: 18
|
||||||
|
|
||||||
- name: Build OpenAPI
|
- name: Build OpenAPI
|
||||||
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API"
|
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
|
||||||
|
|
||||||
- name: Build Postman Collection
|
- name: Build Postman Collection
|
||||||
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false
|
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false
|
||||||
|
|||||||
19
.github/workflows/nim-matrix.yml
vendored
19
.github/workflows/nim-matrix.yml
vendored
@ -8,22 +8,21 @@ env:
|
|||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
nim_version: pinned
|
nim_version: pinned
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
matrix:
|
matrix:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
matrix: ${{ steps.matrix.outputs.matrix }}
|
matrix: ${{ steps.matrix.outputs.matrix }}
|
||||||
cache_nonce: ${{ env.cache_nonce }}
|
cache_nonce: ${{ env.cache_nonce }}
|
||||||
steps:
|
steps:
|
||||||
- name: Compute matrix
|
- name: Checkout sources
|
||||||
id: matrix
|
uses: actions/checkout@v4
|
||||||
uses: fabiocaccamo/create-matrix-action@v5
|
- name: Compute matrix
|
||||||
with:
|
id: matrix
|
||||||
matrix: |
|
run: |
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
echo 'EOF' >> $GITHUB_OUTPUT
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
needs: matrix
|
needs: matrix
|
||||||
|
|||||||
60
.github/workflows/release.yml
vendored
60
.github/workflows/release.yml
vendored
@ -4,13 +4,15 @@ on:
|
|||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
nim_version: pinned
|
nim_version: pinned
|
||||||
rust_version: 1.79.0
|
rust_version: 1.79.0
|
||||||
codex_binary_base: codex
|
storage_binary_base: storage
|
||||||
cirdl_binary_base: cirdl
|
cirdl_binary_base: cirdl
|
||||||
build_dir: build
|
build_dir: build
|
||||||
nim_flags: ''
|
nim_flags: ''
|
||||||
@ -28,9 +30,8 @@ jobs:
|
|||||||
uses: fabiocaccamo/create-matrix-action@v5
|
uses: fabiocaccamo/create-matrix-action@v5
|
||||||
with:
|
with:
|
||||||
matrix: |
|
matrix: |
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
|
||||||
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
||||||
|
|
||||||
@ -72,18 +73,18 @@ jobs:
|
|||||||
windows*) os_name="windows" ;;
|
windows*) os_name="windows" ;;
|
||||||
esac
|
esac
|
||||||
github_ref_name="${GITHUB_REF_NAME/\//-}"
|
github_ref_name="${GITHUB_REF_NAME/\//-}"
|
||||||
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
||||||
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
|
||||||
if [[ ${os_name} == "windows" ]]; then
|
if [[ ${os_name} == "windows" ]]; then
|
||||||
codex_binary="${codex_binary}.exe"
|
storage_binary="${storage_binary}.exe"
|
||||||
cirdl_binary="${cirdl_binary}.exe"
|
cirdl_binary="${cirdl_binary}.exe"
|
||||||
fi
|
fi
|
||||||
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV
|
echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
|
||||||
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
|
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
|
||||||
|
|
||||||
- name: Release - Build
|
- name: Release - Build
|
||||||
run: |
|
run: |
|
||||||
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}"
|
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
|
||||||
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
|
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
|
||||||
|
|
||||||
- name: Release - Libraries
|
- name: Release - Libraries
|
||||||
@ -94,11 +95,11 @@ jobs:
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Release - Upload codex build artifacts
|
- name: Release - Upload Logos Storage build artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: release-${{ env.codex_binary }}
|
name: release-${{ env.storage_binary }}
|
||||||
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}*
|
path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
- name: Release - Upload cirdl build artifacts
|
- name: Release - Upload cirdl build artifacts
|
||||||
@ -138,7 +139,7 @@ jobs:
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Compress and prepare
|
# Compress and prepare
|
||||||
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do
|
for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
|
||||||
if [[ "${file}" == *".exe"* ]]; then
|
if [[ "${file}" == *".exe"* ]]; then
|
||||||
|
|
||||||
# Windows - binary only
|
# Windows - binary only
|
||||||
@ -170,6 +171,34 @@ jobs:
|
|||||||
path: /tmp/release/
|
path: /tmp/release/
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
|
- name: Release - Upload to the cloud
|
||||||
|
env:
|
||||||
|
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
|
||||||
|
s3_bucket: ${{ secrets.S3_BUCKET }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
|
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
|
||||||
|
run: |
|
||||||
|
# Variables
|
||||||
|
branch="${GITHUB_REF_NAME/\//-}"
|
||||||
|
folder="/tmp/release"
|
||||||
|
|
||||||
|
# Tagged releases
|
||||||
|
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
|
||||||
|
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||||
|
echo "${branch}" > "${folder}"/latest
|
||||||
|
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
|
||||||
|
rm -f "${folder}"/latest
|
||||||
|
|
||||||
|
# master branch
|
||||||
|
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
|
||||||
|
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||||
|
|
||||||
|
# Custom branch
|
||||||
|
else
|
||||||
|
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Release
|
- name: Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
@ -177,3 +206,12 @@ jobs:
|
|||||||
files: |
|
files: |
|
||||||
/tmp/release/*
|
/tmp/release/*
|
||||||
make_latest: true
|
make_latest: true
|
||||||
|
|
||||||
|
- name: Generate Python SDK
|
||||||
|
uses: peter-evans/repository-dispatch@v3
|
||||||
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.DISPATCH_PAT }}
|
||||||
|
repository: logos-storage/logos-storage-py-api-client
|
||||||
|
event-type: generate
|
||||||
|
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'
|
||||||
|
|||||||
55
.gitmodules
vendored
55
.gitmodules
vendored
@ -37,22 +37,17 @@
|
|||||||
path = vendor/nim-nitro
|
path = vendor/nim-nitro
|
||||||
url = https://github.com/status-im/nim-nitro.git
|
url = https://github.com/status-im/nim-nitro.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = main
|
||||||
[submodule "vendor/questionable"]
|
[submodule "vendor/questionable"]
|
||||||
path = vendor/questionable
|
path = vendor/questionable
|
||||||
url = https://github.com/status-im/questionable.git
|
url = https://github.com/status-im/questionable.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = main
|
||||||
[submodule "vendor/upraises"]
|
|
||||||
path = vendor/upraises
|
|
||||||
url = https://github.com/markspanbroek/upraises.git
|
|
||||||
ignore = untracked
|
|
||||||
branch = master
|
|
||||||
[submodule "vendor/asynctest"]
|
[submodule "vendor/asynctest"]
|
||||||
path = vendor/asynctest
|
path = vendor/asynctest
|
||||||
url = https://github.com/status-im/asynctest.git
|
url = https://github.com/status-im/asynctest.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = main
|
||||||
[submodule "vendor/nim-presto"]
|
[submodule "vendor/nim-presto"]
|
||||||
path = vendor/nim-presto
|
path = vendor/nim-presto
|
||||||
url = https://github.com/status-im/nim-presto.git
|
url = https://github.com/status-im/nim-presto.git
|
||||||
@ -132,7 +127,7 @@
|
|||||||
path = vendor/nim-websock
|
path = vendor/nim-websock
|
||||||
url = https://github.com/status-im/nim-websock.git
|
url = https://github.com/status-im/nim-websock.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = main
|
||||||
[submodule "vendor/nim-contract-abi"]
|
[submodule "vendor/nim-contract-abi"]
|
||||||
path = vendor/nim-contract-abi
|
path = vendor/nim-contract-abi
|
||||||
url = https://github.com/status-im/nim-contract-abi
|
url = https://github.com/status-im/nim-contract-abi
|
||||||
@ -160,13 +155,13 @@
|
|||||||
path = vendor/nim-taskpools
|
path = vendor/nim-taskpools
|
||||||
url = https://github.com/status-im/nim-taskpools.git
|
url = https://github.com/status-im/nim-taskpools.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = stable
|
||||||
[submodule "vendor/nim-leopard"]
|
[submodule "vendor/nim-leopard"]
|
||||||
path = vendor/nim-leopard
|
path = vendor/nim-leopard
|
||||||
url = https://github.com/status-im/nim-leopard.git
|
url = https://github.com/status-im/nim-leopard.git
|
||||||
[submodule "vendor/nim-codex-dht"]
|
[submodule "vendor/logos-storage-nim-dht"]
|
||||||
path = vendor/nim-codex-dht
|
path = vendor/logos-storage-nim-dht
|
||||||
url = https://github.com/codex-storage/nim-codex-dht.git
|
url = https://github.com/logos-storage/logos-storage-nim-dht.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/nim-datastore"]
|
[submodule "vendor/nim-datastore"]
|
||||||
@ -178,9 +173,11 @@
|
|||||||
[submodule "vendor/nim-eth"]
|
[submodule "vendor/nim-eth"]
|
||||||
path = vendor/nim-eth
|
path = vendor/nim-eth
|
||||||
url = https://github.com/status-im/nim-eth
|
url = https://github.com/status-im/nim-eth
|
||||||
[submodule "vendor/codex-contracts-eth"]
|
[submodule "vendor/logos-storage-contracts-eth"]
|
||||||
path = vendor/codex-contracts-eth
|
path = vendor/logos-storage-contracts-eth
|
||||||
url = https://github.com/status-im/codex-contracts-eth
|
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = master
|
||||||
[submodule "vendor/nim-protobuf-serialization"]
|
[submodule "vendor/nim-protobuf-serialization"]
|
||||||
path = vendor/nim-protobuf-serialization
|
path = vendor/nim-protobuf-serialization
|
||||||
url = https://github.com/status-im/nim-protobuf-serialization
|
url = https://github.com/status-im/nim-protobuf-serialization
|
||||||
@ -195,29 +192,41 @@
|
|||||||
url = https://github.com/zevv/npeg
|
url = https://github.com/zevv/npeg
|
||||||
[submodule "vendor/nim-poseidon2"]
|
[submodule "vendor/nim-poseidon2"]
|
||||||
path = vendor/nim-poseidon2
|
path = vendor/nim-poseidon2
|
||||||
url = https://github.com/codex-storage/nim-poseidon2.git
|
url = https://github.com/logos-storage/nim-poseidon2.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = master
|
||||||
[submodule "vendor/constantine"]
|
[submodule "vendor/constantine"]
|
||||||
path = vendor/constantine
|
path = vendor/constantine
|
||||||
url = https://github.com/mratsim/constantine.git
|
url = https://github.com/mratsim/constantine.git
|
||||||
[submodule "vendor/nim-circom-compat"]
|
[submodule "vendor/nim-circom-compat"]
|
||||||
path = vendor/nim-circom-compat
|
path = vendor/nim-circom-compat
|
||||||
url = https://github.com/codex-storage/nim-circom-compat.git
|
url = https://github.com/logos-storage/nim-circom-compat.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/codex-storage-proofs-circuits"]
|
[submodule "vendor/logos-storage-proofs-circuits"]
|
||||||
path = vendor/codex-storage-proofs-circuits
|
path = vendor/logos-storage-proofs-circuits
|
||||||
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
|
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
|
||||||
ignore = untracked
|
ignore = untracked
|
||||||
branch = master
|
branch = master
|
||||||
[submodule "vendor/nim-serde"]
|
[submodule "vendor/nim-serde"]
|
||||||
path = vendor/nim-serde
|
path = vendor/nim-serde
|
||||||
url = https://github.com/codex-storage/nim-serde.git
|
url = https://github.com/logos-storage/nim-serde.git
|
||||||
[submodule "vendor/nim-leveldbstatic"]
|
[submodule "vendor/nim-leveldbstatic"]
|
||||||
path = vendor/nim-leveldbstatic
|
path = vendor/nim-leveldbstatic
|
||||||
url = https://github.com/codex-storage/nim-leveldb.git
|
url = https://github.com/logos-storage/nim-leveldb.git
|
||||||
[submodule "vendor/nim-zippy"]
|
[submodule "vendor/nim-zippy"]
|
||||||
path = vendor/nim-zippy
|
path = vendor/nim-zippy
|
||||||
url = https://github.com/status-im/nim-zippy.git
|
url = https://github.com/status-im/nim-zippy.git
|
||||||
[submodule "vendor/nph"]
|
[submodule "vendor/nph"]
|
||||||
path = vendor/nph
|
path = vendor/nph
|
||||||
url = https://github.com/arnetheduck/nph.git
|
url = https://github.com/arnetheduck/nph.git
|
||||||
|
[submodule "vendor/nim-quic"]
|
||||||
|
path = vendor/nim-quic
|
||||||
|
url = https://github.com/vacp2p/nim-quic.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = main
|
||||||
|
[submodule "vendor/nim-ngtcp2"]
|
||||||
|
path = vendor/nim-ngtcp2
|
||||||
|
url = https://github.com/vacp2p/nim-ngtcp2.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = main
|
||||||
2
Jenkinsfile
vendored
2
Jenkinsfile
vendored
@ -25,7 +25,7 @@ pipeline {
|
|||||||
stage('Check') {
|
stage('Check') {
|
||||||
steps {
|
steps {
|
||||||
script {
|
script {
|
||||||
sh './result/bin/codex --version'
|
sh './result/bin/storage --version'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
46
Makefile
46
Makefile
@ -15,8 +15,7 @@
|
|||||||
#
|
#
|
||||||
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
||||||
# version pinned by nimbus-build-system.
|
# version pinned by nimbus-build-system.
|
||||||
#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
PINNED_NIM_VERSION := v2.2.4
|
||||||
PINNED_NIM_VERSION := v2.0.14
|
|
||||||
|
|
||||||
ifeq ($(NIM_COMMIT),)
|
ifeq ($(NIM_COMMIT),)
|
||||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||||
@ -94,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
|
|||||||
|
|
||||||
# default target, because it's the first one that doesn't start with '.'
|
# default target, because it's the first one that doesn't start with '.'
|
||||||
|
|
||||||
# Builds the codex binary
|
# Builds the Logos Storage binary
|
||||||
all: | build deps
|
all: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
|
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
|
||||||
|
|
||||||
# Build tools/cirdl
|
# Build tools/cirdl
|
||||||
cirdl: | deps
|
cirdl: | deps
|
||||||
@ -139,12 +138,12 @@ test: | build deps
|
|||||||
# Builds and runs the smart contract tests
|
# Builds and runs the smart contract tests
|
||||||
testContracts: | build deps
|
testContracts: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
|
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
|
||||||
|
|
||||||
# Builds and runs the integration tests
|
# Builds and runs the integration tests
|
||||||
testIntegration: | build deps
|
testIntegration: | build deps
|
||||||
echo -e $(BUILD_MSG) "build/$@" && \
|
echo -e $(BUILD_MSG) "build/$@" && \
|
||||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
|
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
|
||||||
|
|
||||||
# Builds and runs all tests (except for Taiko L2 tests)
|
# Builds and runs all tests (except for Taiko L2 tests)
|
||||||
testAll: | build deps
|
testAll: | build deps
|
||||||
@ -179,11 +178,11 @@ coverage:
|
|||||||
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
|
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
|
||||||
cd nimcache/release/testCodex && rm -f *.c
|
cd nimcache/release/testCodex && rm -f *.c
|
||||||
mkdir -p coverage
|
mkdir -p coverage
|
||||||
lcov --capture --directory nimcache/release/testCodex --output-file coverage/coverage.info
|
lcov --capture --keep-going --directory nimcache/release/testCodex --output-file coverage/coverage.info
|
||||||
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
|
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
|
||||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report
|
||||||
|
|
||||||
show-coverage:
|
show-coverage:
|
||||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||||
@ -233,6 +232,7 @@ format:
|
|||||||
$(NPH) *.nim
|
$(NPH) *.nim
|
||||||
$(NPH) codex/
|
$(NPH) codex/
|
||||||
$(NPH) tests/
|
$(NPH) tests/
|
||||||
|
$(NPH) library/
|
||||||
|
|
||||||
clean-nph:
|
clean-nph:
|
||||||
rm -f $(NPH)
|
rm -f $(NPH)
|
||||||
@ -243,4 +243,32 @@ print-nph-path:
|
|||||||
|
|
||||||
clean: | clean-nph
|
clean: | clean-nph
|
||||||
|
|
||||||
|
################
|
||||||
|
## C Bindings ##
|
||||||
|
################
|
||||||
|
.PHONY: libstorage
|
||||||
|
|
||||||
|
STATIC ?= 0
|
||||||
|
|
||||||
|
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
|
||||||
|
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
|
||||||
|
endif
|
||||||
|
|
||||||
|
libstorage:
|
||||||
|
$(MAKE) deps
|
||||||
|
rm -f build/libstorage*
|
||||||
|
|
||||||
|
ifeq ($(STATIC), 1)
|
||||||
|
echo -e $(BUILD_MSG) "build/$@.a" && \
|
||||||
|
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||||
|
else ifeq ($(detected_OS),Windows)
|
||||||
|
echo -e $(BUILD_MSG) "build/$@.dll" && \
|
||||||
|
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||||
|
else ifeq ($(detected_OS),macOS)
|
||||||
|
echo -e $(BUILD_MSG) "build/$@.dylib" && \
|
||||||
|
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||||
|
else
|
||||||
|
echo -e $(BUILD_MSG) "build/$@.so" && \
|
||||||
|
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||||
|
endif
|
||||||
endif # "variables.mk" was not included
|
endif # "variables.mk" was not included
|
||||||
|
|||||||
76
README.md
76
README.md
@ -1,22 +1,22 @@
|
|||||||
# Codex Decentralized Durability Engine
|
# Logos Storage Decentralized Engine
|
||||||
|
|
||||||
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval.
|
> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
|
||||||
|
|
||||||
> WARNING: This project is under active development and is considered pre-alpha.
|
> WARNING: This project is under active development and is considered pre-alpha.
|
||||||
|
|
||||||
[](https://opensource.org/licenses/Apache-2.0)
|
[](https://opensource.org/licenses/Apache-2.0)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](#stability)
|
[](#stability)
|
||||||
[](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
|
[](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
|
||||||
[](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
|
[](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
|
||||||
[](https://codecov.io/gh/codex-storage/nim-codex)
|
[](https://codecov.io/gh/logos-storage/logos-storage-nim)
|
||||||
[](https://discord.gg/CaJTh24ddQ)
|
[](https://discord.gg/CaJTh24ddQ)
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
## Build and Run
|
## Build and Run
|
||||||
|
|
||||||
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build).
|
For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
|
||||||
|
|
||||||
To build the project, clone it and run:
|
To build the project, clone it and run:
|
||||||
|
|
||||||
@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root
|
|||||||
Run the client with:
|
Run the client with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
build/codex
|
build/storage
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
It is possible to configure a Codex node in several ways:
|
It is possible to configure a Logos Storage node in several ways:
|
||||||
1. CLI options
|
1. CLI options
|
||||||
2. Environment variables
|
2. Environment variables
|
||||||
3. Configuration file
|
3. Configuration file
|
||||||
@ -45,22 +45,72 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
|
|||||||
|
|
||||||
## Guides
|
## Guides
|
||||||
|
|
||||||
To get acquainted with Codex, consider:
|
To get acquainted with Logos Storage, consider:
|
||||||
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
|
* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
|
||||||
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
|
* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
|
||||||
|
|
||||||
## API
|
## API
|
||||||
|
|
||||||
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
||||||
|
|
||||||
|
## Bindings
|
||||||
|
|
||||||
|
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
|
||||||
|
Currently, only a Go binding is included.
|
||||||
|
|
||||||
|
### Build the C library
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make libstorage
|
||||||
|
```
|
||||||
|
|
||||||
|
This produces the shared library under `build/`.
|
||||||
|
|
||||||
|
### Run the Go example
|
||||||
|
|
||||||
|
Build the Go example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go build -o storage-go examples/golang/storage.go
|
||||||
|
```
|
||||||
|
|
||||||
|
Export the library path:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LD_LIBRARY_PATH=build
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./storage-go
|
||||||
|
```
|
||||||
|
|
||||||
|
### Static vs Dynamic build
|
||||||
|
|
||||||
|
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
|
||||||
|
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build dynamic (default)
|
||||||
|
make libstorage
|
||||||
|
|
||||||
|
# Build static
|
||||||
|
make STATIC=1 libstorage
|
||||||
|
```
|
||||||
|
|
||||||
|
### Limitation
|
||||||
|
|
||||||
|
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
|
||||||
|
|
||||||
## Contributing and development
|
## Contributing and development
|
||||||
|
|
||||||
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
||||||
|
|
||||||
### Linting and formatting
|
### Linting and formatting
|
||||||
|
|
||||||
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
|
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
|
||||||
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
|
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
|
||||||
In order to format files run `make nph/<file/folder you want to format>`.
|
In order to format files run `make nph/<file/folder you want to format>`.
|
||||||
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
|
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
|
||||||
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
|
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
|
||||||
@ -10,17 +10,17 @@ nim c -r run_benchmarks
|
|||||||
```
|
```
|
||||||
|
|
||||||
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
|
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
|
||||||
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
||||||
|
|
||||||
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
|
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
|
||||||
|
|
||||||
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
|
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
|
||||||
|
|
||||||
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
||||||
|
|
||||||
## Codex Ark Circom CLI
|
## Logos Storage Ark Circom CLI
|
||||||
|
|
||||||
Runs Codex's prover setup with Ark / Circom.
|
Runs Logos Storage's prover setup with Ark / Circom.
|
||||||
|
|
||||||
Compile:
|
Compile:
|
||||||
```sh
|
```sh
|
||||||
|
|||||||
@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
|
|||||||
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
|
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
|
||||||
let codexDir = findCodexProjectDir()
|
let codexDir = findCodexProjectDir()
|
||||||
result.nimCircuitCli =
|
result.nimCircuitCli =
|
||||||
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
|
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
|
||||||
"proof_input" / "cli"
|
"proof_input" / "cli"
|
||||||
result.circuitDirIncludes =
|
result.circuitDirIncludes =
|
||||||
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
|
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
|
||||||
result.ptauPath =
|
result.ptauPath =
|
||||||
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
|
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
|
||||||
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
|
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
|
||||||
@ -118,7 +118,7 @@ proc createCircuit*(
|
|||||||
##
|
##
|
||||||
## All needed circuit files will be generated as needed.
|
## All needed circuit files will be generated as needed.
|
||||||
## They will be located in `circBenchDir` which defaults to a folder like:
|
## They will be located in `circBenchDir` which defaults to a folder like:
|
||||||
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
||||||
## with all the given CircuitArgs.
|
## with all the given CircuitArgs.
|
||||||
##
|
##
|
||||||
let circdir = circBenchDir
|
let circdir = circBenchDir
|
||||||
|
|||||||
@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
|
|||||||
)
|
)
|
||||||
benchRuns[benchmarkName] = (runs.avg(), count)
|
benchRuns[benchmarkName] = (runs.avg(), count)
|
||||||
|
|
||||||
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
|
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
|
||||||
if printRegular:
|
if printRegular:
|
||||||
echo ""
|
echo ""
|
||||||
for k, v in benchRuns:
|
for k, v in benchRuns:
|
||||||
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
||||||
|
|
||||||
if printTsv:
|
if printTsv:
|
||||||
echo ""
|
echo ""
|
||||||
echo "name", "\t", "avgTimeSec", "\t", "count"
|
echo "name", "\t", "avgTimeSec", "\t", "count"
|
||||||
for k, v in benchRuns:
|
for k, v in benchRuns:
|
||||||
echo k, "\t", v.avgTimeSec, "\t", v.count
|
echo k, "\t", v.avgTimeSec, "\t", v.count
|
||||||
|
|
||||||
|
|
||||||
import std/math
|
import std/math
|
||||||
|
|
||||||
func floorLog2*(x: int): int =
|
func floorLog2*(x: int): int =
|
||||||
|
|||||||
85
build.nims
85
build.nims
@ -3,7 +3,7 @@ mode = ScriptMode.Verbose
|
|||||||
import std/os except commandLineParams
|
import std/os except commandLineParams
|
||||||
|
|
||||||
### Helper functions
|
### Helper functions
|
||||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
|
||||||
if not dirExists "build":
|
if not dirExists "build":
|
||||||
mkDir "build"
|
mkDir "build"
|
||||||
|
|
||||||
@ -18,57 +18,82 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
|||||||
|
|
||||||
let
|
let
|
||||||
# Place build output in 'build' folder, even if name includes a longer path.
|
# Place build output in 'build' folder, even if name includes a longer path.
|
||||||
outName = os.lastPathPart(name)
|
|
||||||
cmd =
|
cmd =
|
||||||
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
|
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
|
||||||
name & ".nim"
|
srcName & ".nim"
|
||||||
|
|
||||||
exec(cmd)
|
exec(cmd)
|
||||||
|
|
||||||
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
|
||||||
buildBinary name, srcDir, params
|
if not dirExists "build":
|
||||||
exec "build/" & name
|
mkDir "build"
|
||||||
|
|
||||||
task codex, "build codex binary":
|
if `type` == "dynamic":
|
||||||
|
let lib_name = (
|
||||||
|
when defined(windows): name & ".dll"
|
||||||
|
elif defined(macosx): name & ".dylib"
|
||||||
|
else: name & ".so"
|
||||||
|
)
|
||||||
|
exec "nim c" & " --out:build/" & lib_name &
|
||||||
|
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||||
|
"--nimMainPrefix:libstorage -d:noSignalHandler " &
|
||||||
|
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
|
||||||
|
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
|
||||||
|
else:
|
||||||
|
exec "nim c" & " --out:build/" & name &
|
||||||
|
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||||
|
"--nimMainPrefix:libstorage -d:noSignalHandler " &
|
||||||
|
"-d:LeopardExtraCompilerFlags=-fPIC " &
|
||||||
|
"-d:chronicles_runtime_filtering " &
|
||||||
|
"-d:chronicles_log_level=TRACE " &
|
||||||
|
params & " " & srcDir & name & ".nim"
|
||||||
|
|
||||||
|
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
|
||||||
|
buildBinary name, outName, srcDir, params
|
||||||
|
exec "build/" & outName
|
||||||
|
|
||||||
|
task storage, "build logos storage binary":
|
||||||
buildBinary "codex",
|
buildBinary "codex",
|
||||||
|
outname = "storage",
|
||||||
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||||
|
|
||||||
task toolsCirdl, "build tools/cirdl binary":
|
task toolsCirdl, "build tools/cirdl binary":
|
||||||
buildBinary "tools/cirdl/cirdl"
|
buildBinary "tools/cirdl/cirdl"
|
||||||
|
|
||||||
task testCodex, "Build & run Codex tests":
|
task testStorage, "Build & run Logos Storage tests":
|
||||||
test "testCodex", params = "-d:codex_enable_proof_failures=true"
|
test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
|
||||||
|
|
||||||
task testContracts, "Build & run Codex Contract tests":
|
task testContracts, "Build & run Logos Storage Contract tests":
|
||||||
test "testContracts"
|
test "testContracts"
|
||||||
|
|
||||||
task testIntegration, "Run integration tests":
|
task testIntegration, "Run integration tests":
|
||||||
buildBinary "codex",
|
buildBinary "codex",
|
||||||
|
outName = "storage",
|
||||||
params =
|
params =
|
||||||
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
|
||||||
test "testIntegration"
|
test "testIntegration"
|
||||||
# use params to enable logging from the integration test executable
|
# use params to enable logging from the integration test executable
|
||||||
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
|
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
|
||||||
# "-d:chronicles_enabled_topics:integration:TRACE"
|
# "-d:chronicles_enabled_topics:integration:TRACE"
|
||||||
|
|
||||||
task build, "build codex binary":
|
task build, "build Logos Storage binary":
|
||||||
codexTask()
|
storageTask()
|
||||||
|
|
||||||
task test, "Run tests":
|
task test, "Run tests":
|
||||||
testCodexTask()
|
testStorageTask()
|
||||||
|
|
||||||
task testTools, "Run Tools tests":
|
task testTools, "Run Tools tests":
|
||||||
toolsCirdlTask()
|
toolsCirdlTask()
|
||||||
test "testTools"
|
test "testTools"
|
||||||
|
|
||||||
task testAll, "Run all tests (except for Taiko L2 tests)":
|
task testAll, "Run all tests (except for Taiko L2 tests)":
|
||||||
testCodexTask()
|
testStorageTask()
|
||||||
testContractsTask()
|
testContractsTask()
|
||||||
testIntegrationTask()
|
testIntegrationTask()
|
||||||
testToolsTask()
|
testToolsTask()
|
||||||
|
|
||||||
task testTaiko, "Run Taiko L2 tests":
|
task testTaiko, "Run Taiko L2 tests":
|
||||||
codexTask()
|
storageTask()
|
||||||
test "testTaiko"
|
test "testTaiko"
|
||||||
|
|
||||||
import strutils
|
import strutils
|
||||||
@ -101,23 +126,43 @@ task coverage, "generates code coverage report":
|
|||||||
test "coverage",
|
test "coverage",
|
||||||
srcDir = "tests/",
|
srcDir = "tests/",
|
||||||
params =
|
params =
|
||||||
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
|
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
|
||||||
exec("rm nimcache/coverage/*.c")
|
exec("rm nimcache/coverage/*.c")
|
||||||
rmDir("coverage")
|
rmDir("coverage")
|
||||||
mkDir("coverage")
|
mkDir("coverage")
|
||||||
echo " ======== Running LCOV ======== "
|
echo " ======== Running LCOV ======== "
|
||||||
exec(
|
exec(
|
||||||
"lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info"
|
"lcov --capture --keep-going --directory nimcache/coverage --output-file coverage/coverage.info"
|
||||||
)
|
)
|
||||||
exec(
|
exec(
|
||||||
"lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " &
|
"lcov --extract coverage/coverage.info --keep-going --output-file coverage/coverage.f.info " &
|
||||||
nimSrcs
|
nimSrcs
|
||||||
)
|
)
|
||||||
echo " ======== Generating HTML coverage report ======== "
|
echo " ======== Generating HTML coverage report ======== "
|
||||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ")
|
||||||
echo " ======== Coverage report Done ======== "
|
echo " ======== Coverage report Done ======== "
|
||||||
|
|
||||||
task showCoverage, "open coverage html":
|
task showCoverage, "open coverage html":
|
||||||
echo " ======== Opening HTML coverage report in browser... ======== "
|
echo " ======== Opening HTML coverage report in browser... ======== "
|
||||||
if findExe("open") != "":
|
if findExe("open") != "":
|
||||||
exec("open coverage/report/index.html")
|
exec("open coverage/report/index.html")
|
||||||
|
|
||||||
|
task libstorageDynamic, "Generate bindings":
|
||||||
|
var params = ""
|
||||||
|
when compiles(commandLineParams):
|
||||||
|
for param in commandLineParams():
|
||||||
|
if param.len > 0 and param.startsWith("-"):
|
||||||
|
params.add " " & param
|
||||||
|
|
||||||
|
let name = "libstorage"
|
||||||
|
buildLibrary name, "library/", params, "dynamic"
|
||||||
|
|
||||||
|
task libstorageStatic, "Generate bindings":
|
||||||
|
var params = ""
|
||||||
|
when compiles(commandLineParams):
|
||||||
|
for param in commandLineParams():
|
||||||
|
if param.len > 0 and param.startsWith("-"):
|
||||||
|
params.add " " & param
|
||||||
|
|
||||||
|
let name = "libstorage"
|
||||||
|
buildLibrary name, "library/", params, "static"
|
||||||
|
|||||||
26
codex.nim
26
codex.nim
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -45,7 +45,7 @@ when isMainModule:
|
|||||||
|
|
||||||
let config = CodexConf.load(
|
let config = CodexConf.load(
|
||||||
version = codexFullVersion,
|
version = codexFullVersion,
|
||||||
envVarsPrefix = "codex",
|
envVarsPrefix = "storage",
|
||||||
secondarySources = proc(
|
secondarySources = proc(
|
||||||
config: CodexConf, sources: auto
|
config: CodexConf, sources: auto
|
||||||
) {.gcsafe, raises: [ConfigurationError].} =
|
) {.gcsafe, raises: [ConfigurationError].} =
|
||||||
@ -54,6 +54,16 @@ when isMainModule:
|
|||||||
,
|
,
|
||||||
)
|
)
|
||||||
config.setupLogging()
|
config.setupLogging()
|
||||||
|
|
||||||
|
try:
|
||||||
|
updateLogLevel(config.logLevel)
|
||||||
|
except ValueError as err:
|
||||||
|
try:
|
||||||
|
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
|
||||||
|
except IOError:
|
||||||
|
echo "Invalid value for --log-level. " & err.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
config.setupMetrics()
|
config.setupMetrics()
|
||||||
|
|
||||||
if not (checkAndCreateDataDir((config.dataDir).string)):
|
if not (checkAndCreateDataDir((config.dataDir).string)):
|
||||||
@ -89,15 +99,15 @@ when isMainModule:
|
|||||||
try:
|
try:
|
||||||
CodexServer.new(config, privateKey)
|
CodexServer.new(config, privateKey)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
error "Failed to start Codex", msg = exc.msg
|
error "Failed to start Logos Storage", msg = exc.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
## Ctrl+C handling
|
## Ctrl+C handling
|
||||||
proc doShutdown() =
|
proc doShutdown() =
|
||||||
shutdown = server.stop()
|
shutdown = server.shutdown()
|
||||||
state = CodexStatus.Stopping
|
state = CodexStatus.Stopping
|
||||||
|
|
||||||
notice "Stopping Codex"
|
notice "Stopping Logos Storage"
|
||||||
|
|
||||||
proc controlCHandler() {.noconv.} =
|
proc controlCHandler() {.noconv.} =
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
@ -128,7 +138,7 @@ when isMainModule:
|
|||||||
try:
|
try:
|
||||||
waitFor server.start()
|
waitFor server.start()
|
||||||
except CatchableError as error:
|
except CatchableError as error:
|
||||||
error "Codex failed to start", error = error.msg
|
error "Logos Storage failed to start", error = error.msg
|
||||||
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
||||||
# but this would mean we'd have to fix the implementation of all
|
# but this would mean we'd have to fix the implementation of all
|
||||||
# services so they won't crash if we attempt to stop them before they
|
# services so they won't crash if we attempt to stop them before they
|
||||||
@ -149,7 +159,7 @@ when isMainModule:
|
|||||||
# be assigned before state switches to Stopping
|
# be assigned before state switches to Stopping
|
||||||
waitFor shutdown
|
waitFor shutdown
|
||||||
except CatchableError as error:
|
except CatchableError as error:
|
||||||
error "Codex didn't shutdown correctly", error = error.msg
|
error "Logos Storage didn't shutdown correctly", error = error.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
notice "Exited codex"
|
notice "Exited Storage"
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
author = "Codex Team"
|
author = "Logos Storage Team"
|
||||||
description = "p2p data durability engine"
|
description = "p2p data durability engine"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
binDir = "build"
|
binDir = "build"
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,6 +7,8 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p/cid
|
import pkg/libp2p/cid
|
||||||
import pkg/libp2p/multicodec
|
import pkg/libp2p/multicodec
|
||||||
@ -41,106 +43,109 @@ type Advertiser* = ref object of RootObj
|
|||||||
advertiserRunning*: bool # Indicates if discovery is running
|
advertiserRunning*: bool # Indicates if discovery is running
|
||||||
concurrentAdvReqs: int # Concurrent advertise requests
|
concurrentAdvReqs: int # Concurrent advertise requests
|
||||||
|
|
||||||
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
|
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
|
||||||
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
||||||
trackedFutures*: TrackedFutures # Advertise tasks futures
|
trackedFutures*: TrackedFutures # Advertise tasks futures
|
||||||
|
|
||||||
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
||||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||||
|
|
||||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
|
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||||
if cid notin b.advertiseQueue:
|
if cid notin b.advertiseQueue:
|
||||||
await b.advertiseQueue.put(cid)
|
await b.advertiseQueue.put(cid)
|
||||||
|
|
||||||
trace "Advertising", cid
|
trace "Advertising", cid
|
||||||
|
|
||||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
|
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||||
without isM =? cid.isManifest, err:
|
without isM =? cid.isManifest, err:
|
||||||
warn "Unable to determine if cid is manifest"
|
warn "Unable to determine if cid is manifest"
|
||||||
return
|
return
|
||||||
|
|
||||||
if isM:
|
try:
|
||||||
without blk =? await b.localStore.getBlock(cid), err:
|
if isM:
|
||||||
error "Error retrieving manifest block", cid, err = err.msg
|
without blk =? await b.localStore.getBlock(cid), err:
|
||||||
return
|
error "Error retrieving manifest block", cid, err = err.msg
|
||||||
|
return
|
||||||
|
|
||||||
without manifest =? Manifest.decode(blk), err:
|
without manifest =? Manifest.decode(blk), err:
|
||||||
error "Unable to decode as manifest", err = err.msg
|
error "Unable to decode as manifest", err = err.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
# announce manifest cid and tree cid
|
# announce manifest cid and tree cid
|
||||||
await b.addCidToQueue(cid)
|
await b.addCidToQueue(cid)
|
||||||
await b.addCidToQueue(manifest.treeCid)
|
await b.addCidToQueue(manifest.treeCid)
|
||||||
|
except CancelledError as exc:
|
||||||
|
trace "Cancelled advertise block", cid
|
||||||
|
raise exc
|
||||||
|
except CatchableError as e:
|
||||||
|
error "failed to advertise block", cid, error = e.msgDetail
|
||||||
|
|
||||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
||||||
while b.advertiserRunning:
|
try:
|
||||||
try:
|
while b.advertiserRunning:
|
||||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
if cidsIter =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||||
trace "Advertiser begins iterating blocks..."
|
trace "Advertiser begins iterating blocks..."
|
||||||
for c in cids:
|
for c in cidsIter:
|
||||||
if cid =? await c:
|
if cid =? await c:
|
||||||
await b.advertiseBlock(cid)
|
await b.advertiseBlock(cid)
|
||||||
trace "Advertiser iterating blocks finished."
|
trace "Advertiser iterating blocks finished."
|
||||||
|
|
||||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
|
warn "Cancelled advertise local store loop"
|
||||||
except CatchableError as e:
|
|
||||||
error "failed to advertise blocks in local store", error = e.msgDetail
|
|
||||||
|
|
||||||
info "Exiting advertise task loop"
|
info "Exiting advertise task loop"
|
||||||
|
|
||||||
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
||||||
while b.advertiserRunning:
|
try:
|
||||||
try:
|
while b.advertiserRunning:
|
||||||
let cid = await b.advertiseQueue.get()
|
let cid = await b.advertiseQueue.get()
|
||||||
|
|
||||||
if cid in b.inFlightAdvReqs:
|
if cid in b.inFlightAdvReqs:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
let request = b.discovery.provide(cid)
|
||||||
let request = b.discovery.provide(cid)
|
b.inFlightAdvReqs[cid] = request
|
||||||
|
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||||
|
|
||||||
b.inFlightAdvReqs[cid] = request
|
defer:
|
||||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
|
||||||
await request
|
|
||||||
finally:
|
|
||||||
b.inFlightAdvReqs.del(cid)
|
b.inFlightAdvReqs.del(cid)
|
||||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||||
except CancelledError:
|
|
||||||
trace "Advertise task cancelled"
|
await request
|
||||||
return
|
except CancelledError:
|
||||||
except CatchableError as exc:
|
warn "Cancelled advertise task runner"
|
||||||
warn "Exception in advertise task runner", exc = exc.msg
|
|
||||||
|
|
||||||
info "Exiting advertise task runner"
|
info "Exiting advertise task runner"
|
||||||
|
|
||||||
proc start*(b: Advertiser) {.async.} =
|
proc start*(b: Advertiser) {.async: (raises: []).} =
|
||||||
## Start the advertiser
|
## Start the advertiser
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Advertiser start"
|
trace "Advertiser start"
|
||||||
|
|
||||||
proc onBlock(cid: Cid) {.async.} =
|
# The advertiser is expected to be started only once.
|
||||||
await b.advertiseBlock(cid)
|
if b.advertiserRunning:
|
||||||
|
raiseAssert "Advertiser can only be started once — this should not happen"
|
||||||
|
|
||||||
|
proc onBlock(cid: Cid) {.async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
await b.advertiseBlock(cid)
|
||||||
|
except CancelledError:
|
||||||
|
trace "Cancelled advertise block", cid
|
||||||
|
|
||||||
doAssert(b.localStore.onBlockStored.isNone())
|
doAssert(b.localStore.onBlockStored.isNone())
|
||||||
b.localStore.onBlockStored = onBlock.some
|
b.localStore.onBlockStored = onBlock.some
|
||||||
|
|
||||||
if b.advertiserRunning:
|
|
||||||
warn "Starting advertiser twice"
|
|
||||||
return
|
|
||||||
|
|
||||||
b.advertiserRunning = true
|
b.advertiserRunning = true
|
||||||
for i in 0 ..< b.concurrentAdvReqs:
|
for i in 0 ..< b.concurrentAdvReqs:
|
||||||
let fut = b.processQueueLoop()
|
let fut = b.processQueueLoop()
|
||||||
b.trackedFutures.track(fut)
|
b.trackedFutures.track(fut)
|
||||||
asyncSpawn fut
|
|
||||||
|
|
||||||
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
|
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
|
||||||
b.trackedFutures.track(b.advertiseLocalStoreLoop)
|
b.trackedFutures.track(b.advertiseLocalStoreLoop)
|
||||||
asyncSpawn b.advertiseLocalStoreLoop
|
|
||||||
|
|
||||||
proc stop*(b: Advertiser) {.async.} =
|
proc stop*(b: Advertiser) {.async: (raises: []).} =
|
||||||
## Stop the advertiser
|
## Stop the advertiser
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -8,6 +8,7 @@
|
|||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
import std/algorithm
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p/cid
|
import pkg/libp2p/cid
|
||||||
@ -38,6 +39,7 @@ const
|
|||||||
DefaultConcurrentDiscRequests = 10
|
DefaultConcurrentDiscRequests = 10
|
||||||
DefaultDiscoveryTimeout = 1.minutes
|
DefaultDiscoveryTimeout = 1.minutes
|
||||||
DefaultMinPeersPerBlock = 3
|
DefaultMinPeersPerBlock = 3
|
||||||
|
DefaultMaxPeersPerBlock = 8
|
||||||
DefaultDiscoveryLoopSleep = 3.seconds
|
DefaultDiscoveryLoopSleep = 3.seconds
|
||||||
|
|
||||||
type DiscoveryEngine* = ref object of RootObj
|
type DiscoveryEngine* = ref object of RootObj
|
||||||
@ -48,77 +50,90 @@ type DiscoveryEngine* = ref object of RootObj
|
|||||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||||
discEngineRunning*: bool # Indicates if discovery is running
|
discEngineRunning*: bool # Indicates if discovery is running
|
||||||
concurrentDiscReqs: int # Concurrent discovery requests
|
concurrentDiscReqs: int # Concurrent discovery requests
|
||||||
discoveryLoop*: Future[void] # Discovery loop task handle
|
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
|
||||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||||
minPeersPerBlock*: int # Max number of peers with block
|
minPeersPerBlock*: int # Min number of peers with block
|
||||||
|
maxPeersPerBlock*: int # Max number of peers with block
|
||||||
discoveryLoopSleep: Duration # Discovery loop sleep
|
discoveryLoopSleep: Duration # Discovery loop sleep
|
||||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
|
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
|
||||||
# Inflight discovery requests
|
# Inflight discovery requests
|
||||||
|
|
||||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
|
||||||
while b.discEngineRunning:
|
var haves = b.peers.peersHave(cid)
|
||||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
let count = haves.len - b.maxPeersPerBlock
|
||||||
try:
|
if count <= 0:
|
||||||
await b.discoveryQueue.put(cid)
|
return
|
||||||
except CancelledError:
|
|
||||||
trace "Discovery loop cancelled"
|
|
||||||
return
|
|
||||||
except CatchableError as exc:
|
|
||||||
warn "Exception in discovery loop", exc = exc.msg
|
|
||||||
|
|
||||||
|
haves.sort(
|
||||||
|
proc(a, b: BlockExcPeerCtx): int =
|
||||||
|
cmp(a.lastExchange, b.lastExchange)
|
||||||
|
)
|
||||||
|
|
||||||
|
let toRemove = haves[0 ..< count]
|
||||||
|
for peer in toRemove:
|
||||||
try:
|
try:
|
||||||
logScope:
|
peer.cleanPresence(BlockAddress.init(cid))
|
||||||
sleep = b.discoveryLoopSleep
|
trace "Removed block presence from peer", cid, peer = peer.id
|
||||||
wanted = b.pendingBlocks.len
|
except CatchableError as exc:
|
||||||
|
error "Failed to clean presence for peer",
|
||||||
|
cid, peer = peer.id, error = exc.msg, name = exc.name
|
||||||
|
|
||||||
|
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
while b.discEngineRunning:
|
||||||
|
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||||
|
await b.discoveryQueue.put(cid)
|
||||||
|
|
||||||
await sleepAsync(b.discoveryLoopSleep)
|
await sleepAsync(b.discoveryLoopSleep)
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
discard # do not propagate as discoveryQueueLoop was asyncSpawned
|
trace "Discovery loop cancelled"
|
||||||
|
|
||||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||||
## Run discovery tasks
|
## Run discovery tasks
|
||||||
##
|
##
|
||||||
|
|
||||||
while b.discEngineRunning:
|
try:
|
||||||
try:
|
while b.discEngineRunning:
|
||||||
let cid = await b.discoveryQueue.get()
|
let cid = await b.discoveryQueue.get()
|
||||||
|
|
||||||
if cid in b.inFlightDiscReqs:
|
if cid in b.inFlightDiscReqs:
|
||||||
trace "Discovery request already in progress", cid
|
trace "Discovery request already in progress", cid
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
trace "Running discovery task for cid", cid
|
||||||
|
|
||||||
let haves = b.peers.peersHave(cid)
|
let haves = b.peers.peersHave(cid)
|
||||||
|
|
||||||
|
if haves.len > b.maxPeersPerBlock:
|
||||||
|
trace "Cleaning up excess peers",
|
||||||
|
cid, peers = haves.len, max = b.maxPeersPerBlock
|
||||||
|
b.cleanupExcessPeers(cid)
|
||||||
|
continue
|
||||||
|
|
||||||
if haves.len < b.minPeersPerBlock:
|
if haves.len < b.minPeersPerBlock:
|
||||||
try:
|
let request = b.discovery.find(cid)
|
||||||
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
|
b.inFlightDiscReqs[cid] = request
|
||||||
|
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||||
|
|
||||||
b.inFlightDiscReqs[cid] = request
|
defer:
|
||||||
|
b.inFlightDiscReqs.del(cid)
|
||||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||||
let peers = await request
|
|
||||||
|
|
||||||
|
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
|
||||||
|
peers =? (await request).catch:
|
||||||
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
|
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
|
||||||
|
|
||||||
for i, f in dialed:
|
for i, f in dialed:
|
||||||
if f.failed:
|
if f.failed:
|
||||||
await b.discovery.removeProvider(peers[i].data.peerId)
|
await b.discovery.removeProvider(peers[i].data.peerId)
|
||||||
finally:
|
except CancelledError:
|
||||||
b.inFlightDiscReqs.del(cid)
|
trace "Discovery task cancelled"
|
||||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
return
|
||||||
except CancelledError:
|
|
||||||
trace "Discovery task cancelled"
|
|
||||||
return
|
|
||||||
except CatchableError as exc:
|
|
||||||
warn "Exception in discovery task runner", exc = exc.msg
|
|
||||||
except Exception as e:
|
|
||||||
# Raised by b.discovery.removeProvider somehow...
|
|
||||||
# This should not be catchable, and we should never get here. Therefore,
|
|
||||||
# raise a Defect.
|
|
||||||
raiseAssert "Exception when removing provider"
|
|
||||||
|
|
||||||
info "Exiting discovery task runner"
|
info "Exiting discovery task runner"
|
||||||
|
|
||||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
|
||||||
for cid in cids:
|
for cid in cids:
|
||||||
if cid notin b.discoveryQueue:
|
if cid notin b.discoveryQueue:
|
||||||
try:
|
try:
|
||||||
@ -126,11 +141,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
|||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
warn "Exception queueing discovery request", exc = exc.msg
|
warn "Exception queueing discovery request", exc = exc.msg
|
||||||
|
|
||||||
proc start*(b: DiscoveryEngine) {.async.} =
|
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||||
## Start the discengine task
|
## Start the discengine task
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Discovery engine start"
|
trace "Discovery engine starting"
|
||||||
|
|
||||||
if b.discEngineRunning:
|
if b.discEngineRunning:
|
||||||
warn "Starting discovery engine twice"
|
warn "Starting discovery engine twice"
|
||||||
@ -140,12 +155,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
|
|||||||
for i in 0 ..< b.concurrentDiscReqs:
|
for i in 0 ..< b.concurrentDiscReqs:
|
||||||
let fut = b.discoveryTaskLoop()
|
let fut = b.discoveryTaskLoop()
|
||||||
b.trackedFutures.track(fut)
|
b.trackedFutures.track(fut)
|
||||||
asyncSpawn fut
|
|
||||||
|
|
||||||
b.discoveryLoop = b.discoveryQueueLoop()
|
b.discoveryLoop = b.discoveryQueueLoop()
|
||||||
b.trackedFutures.track(b.discoveryLoop)
|
b.trackedFutures.track(b.discoveryLoop)
|
||||||
|
|
||||||
proc stop*(b: DiscoveryEngine) {.async.} =
|
trace "Discovery engine started"
|
||||||
|
|
||||||
|
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||||
## Stop the discovery engine
|
## Stop the discovery engine
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -171,6 +187,7 @@ proc new*(
|
|||||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||||
minPeersPerBlock = DefaultMinPeersPerBlock,
|
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||||
|
maxPeersPerBlock = DefaultMaxPeersPerBlock,
|
||||||
): DiscoveryEngine =
|
): DiscoveryEngine =
|
||||||
## Create a discovery engine instance for advertising services
|
## Create a discovery engine instance for advertising services
|
||||||
##
|
##
|
||||||
@ -186,4 +203,5 @@ proc new*(
|
|||||||
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
||||||
discoveryLoopSleep: discoveryLoopSleep,
|
discoveryLoopSleep: discoveryLoopSleep,
|
||||||
minPeersPerBlock: minPeersPerBlock,
|
minPeersPerBlock: minPeersPerBlock,
|
||||||
|
maxPeersPerBlock: maxPeersPerBlock,
|
||||||
)
|
)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,6 +7,8 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/math
|
import std/math
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
@ -15,9 +17,6 @@ import ../peers
|
|||||||
export nitro
|
export nitro
|
||||||
export results
|
export results
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
const ChainId* = 0.u256 # invalid chain id for now
|
const ChainId* = 0.u256 # invalid chain id for now
|
||||||
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
||||||
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -34,7 +34,7 @@ declareGauge(
|
|||||||
|
|
||||||
const
|
const
|
||||||
DefaultBlockRetries* = 3000
|
DefaultBlockRetries* = 3000
|
||||||
DefaultRetryInterval* = 500.millis
|
DefaultRetryInterval* = 2.seconds
|
||||||
|
|
||||||
type
|
type
|
||||||
RetriesExhaustedError* = object of CatchableError
|
RetriesExhaustedError* = object of CatchableError
|
||||||
@ -42,7 +42,7 @@ type
|
|||||||
|
|
||||||
BlockReq* = object
|
BlockReq* = object
|
||||||
handle*: BlockHandle
|
handle*: BlockHandle
|
||||||
inFlight*: bool
|
requested*: ?PeerId
|
||||||
blockRetries*: int
|
blockRetries*: int
|
||||||
startTime*: int64
|
startTime*: int64
|
||||||
|
|
||||||
@ -50,12 +50,13 @@ type
|
|||||||
blockRetries*: int = DefaultBlockRetries
|
blockRetries*: int = DefaultBlockRetries
|
||||||
retryInterval*: Duration = DefaultRetryInterval
|
retryInterval*: Duration = DefaultRetryInterval
|
||||||
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
||||||
|
lastInclusion*: Moment # time at which we last included a block into our wantlist
|
||||||
|
|
||||||
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||||
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
||||||
|
|
||||||
proc getWantHandle*(
|
proc getWantHandle*(
|
||||||
self: PendingBlocksManager, address: BlockAddress, inFlight = false
|
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
|
||||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||||
## Add an event for a block
|
## Add an event for a block
|
||||||
##
|
##
|
||||||
@ -65,11 +66,13 @@ proc getWantHandle*(
|
|||||||
do:
|
do:
|
||||||
let blk = BlockReq(
|
let blk = BlockReq(
|
||||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||||
inFlight: inFlight,
|
requested: requested,
|
||||||
blockRetries: self.blockRetries,
|
blockRetries: self.blockRetries,
|
||||||
startTime: getMonoTime().ticks,
|
startTime: getMonoTime().ticks,
|
||||||
)
|
)
|
||||||
self.blocks[address] = blk
|
self.blocks[address] = blk
|
||||||
|
self.lastInclusion = Moment.now()
|
||||||
|
|
||||||
let handle = blk.handle
|
let handle = blk.handle
|
||||||
|
|
||||||
proc cleanUpBlock(data: pointer) {.raises: [].} =
|
proc cleanUpBlock(data: pointer) {.raises: [].} =
|
||||||
@ -86,9 +89,22 @@ proc getWantHandle*(
|
|||||||
return handle
|
return handle
|
||||||
|
|
||||||
proc getWantHandle*(
|
proc getWantHandle*(
|
||||||
self: PendingBlocksManager, cid: Cid, inFlight = false
|
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
|
||||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||||
self.getWantHandle(BlockAddress.init(cid), inFlight)
|
self.getWantHandle(BlockAddress.init(cid), requested)
|
||||||
|
|
||||||
|
proc completeWantHandle*(
|
||||||
|
self: PendingBlocksManager, address: BlockAddress, blk: Block
|
||||||
|
) {.raises: [].} =
|
||||||
|
## Complete a pending want handle
|
||||||
|
self.blocks.withValue(address, blockReq):
|
||||||
|
if not blockReq[].handle.finished:
|
||||||
|
trace "Completing want handle from provided block", address
|
||||||
|
blockReq[].handle.complete(blk)
|
||||||
|
else:
|
||||||
|
trace "Want handle already completed", address
|
||||||
|
do:
|
||||||
|
trace "No pending want handle found for address", address
|
||||||
|
|
||||||
proc resolve*(
|
proc resolve*(
|
||||||
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
|
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
|
||||||
@ -108,9 +124,6 @@ proc resolve*(
|
|||||||
blockReq.handle.complete(bd.blk)
|
blockReq.handle.complete(bd.blk)
|
||||||
|
|
||||||
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
|
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
|
||||||
|
|
||||||
if retrievalDurationUs > 500000:
|
|
||||||
warn "High block retrieval time", retrievalDurationUs, address = bd.address
|
|
||||||
else:
|
else:
|
||||||
trace "Block handle already finished", address = bd.address
|
trace "Block handle already finished", address = bd.address
|
||||||
|
|
||||||
@ -128,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
|
|||||||
self.blocks.withValue(address, pending):
|
self.blocks.withValue(address, pending):
|
||||||
result = pending[].blockRetries <= 0
|
result = pending[].blockRetries <= 0
|
||||||
|
|
||||||
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
|
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||||
## Set inflight status for a block
|
## Check if a block has been requested to a peer
|
||||||
|
##
|
||||||
|
result = false
|
||||||
|
self.blocks.withValue(address, pending):
|
||||||
|
result = pending[].requested.isSome
|
||||||
|
|
||||||
|
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
|
||||||
|
## Returns the peer that requested this block
|
||||||
|
##
|
||||||
|
result = PeerId.none
|
||||||
|
self.blocks.withValue(address, pending):
|
||||||
|
result = pending[].requested
|
||||||
|
|
||||||
|
proc markRequested*(
|
||||||
|
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
|
||||||
|
): bool =
|
||||||
|
## Marks this block as having been requested to a peer
|
||||||
##
|
##
|
||||||
|
|
||||||
self.blocks.withValue(address, pending):
|
if self.isRequested(address):
|
||||||
pending[].inFlight = inFlight
|
return false
|
||||||
|
|
||||||
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
|
|
||||||
## Check if a block is in flight
|
|
||||||
##
|
|
||||||
|
|
||||||
self.blocks.withValue(address, pending):
|
self.blocks.withValue(address, pending):
|
||||||
result = pending[].inFlight
|
pending[].requested = peer.some
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc clearRequest*(
|
||||||
|
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
|
||||||
|
) =
|
||||||
|
self.blocks.withValue(address, pending):
|
||||||
|
if peer.isSome:
|
||||||
|
assert peer == pending[].requested
|
||||||
|
pending[].requested = PeerId.none
|
||||||
|
|
||||||
func contains*(self: PendingBlocksManager, cid: Cid): bool =
|
func contains*(self: PendingBlocksManager, cid: Cid): bool =
|
||||||
BlockAddress.init(cid) in self.blocks
|
BlockAddress.init(cid) in self.blocks
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -35,13 +35,14 @@ const
|
|||||||
DefaultMaxInflight* = 100
|
DefaultMaxInflight* = 100
|
||||||
|
|
||||||
type
|
type
|
||||||
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
|
||||||
BlocksDeliveryHandler* =
|
BlocksDeliveryHandler* =
|
||||||
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
|
||||||
BlockPresenceHandler* =
|
BlockPresenceHandler* =
|
||||||
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
|
||||||
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
|
||||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
|
||||||
|
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
|
||||||
|
|
||||||
BlockExcHandlers* = object
|
BlockExcHandlers* = object
|
||||||
onWantList*: WantListHandler
|
onWantList*: WantListHandler
|
||||||
@ -49,6 +50,9 @@ type
|
|||||||
onPresence*: BlockPresenceHandler
|
onPresence*: BlockPresenceHandler
|
||||||
onAccount*: AccountHandler
|
onAccount*: AccountHandler
|
||||||
onPayment*: PaymentHandler
|
onPayment*: PaymentHandler
|
||||||
|
onPeerJoined*: PeerEventHandler
|
||||||
|
onPeerDeparted*: PeerEventHandler
|
||||||
|
onPeerDropped*: PeerEventHandler
|
||||||
|
|
||||||
WantListSender* = proc(
|
WantListSender* = proc(
|
||||||
id: PeerId,
|
id: PeerId,
|
||||||
@ -58,15 +62,20 @@ type
|
|||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false,
|
sendDontHave: bool = false,
|
||||||
): Future[void] {.gcsafe.}
|
) {.async: (raises: [CancelledError]).}
|
||||||
WantCancellationSender* =
|
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
|
||||||
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
async: (raises: [CancelledError])
|
||||||
BlocksDeliverySender* =
|
.}
|
||||||
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
|
||||||
PresenceSender* =
|
async: (raises: [CancelledError])
|
||||||
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
.}
|
||||||
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
|
||||||
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
async: (raises: [CancelledError])
|
||||||
|
.}
|
||||||
|
AccountSender* =
|
||||||
|
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
|
||||||
|
PaymentSender* =
|
||||||
|
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
|
||||||
|
|
||||||
BlockExcRequest* = object
|
BlockExcRequest* = object
|
||||||
sendWantList*: WantListSender
|
sendWantList*: WantListSender
|
||||||
@ -98,7 +107,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
|
|||||||
|
|
||||||
return b.peerId == peer
|
return b.peerId == peer
|
||||||
|
|
||||||
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
proc send*(
|
||||||
|
b: BlockExcNetwork, id: PeerId, msg: pb.Message
|
||||||
|
) {.async: (raises: [CancelledError]).} =
|
||||||
## Send message to peer
|
## Send message to peer
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -106,8 +117,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
|||||||
trace "Unable to send, peer not found", peerId = id
|
trace "Unable to send, peer not found", peerId = id
|
||||||
return
|
return
|
||||||
|
|
||||||
let peer = b.peers[id]
|
|
||||||
try:
|
try:
|
||||||
|
let peer = b.peers[id]
|
||||||
|
|
||||||
await b.inflightSema.acquire()
|
await b.inflightSema.acquire()
|
||||||
await peer.send(msg)
|
await peer.send(msg)
|
||||||
except CancelledError as error:
|
except CancelledError as error:
|
||||||
@ -117,7 +129,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
|||||||
finally:
|
finally:
|
||||||
b.inflightSema.release()
|
b.inflightSema.release()
|
||||||
|
|
||||||
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
|
proc handleWantList(
|
||||||
|
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
|
||||||
|
) {.async: (raises: []).} =
|
||||||
## Handle incoming want list
|
## Handle incoming want list
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -133,7 +147,7 @@ proc sendWantList*(
|
|||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false,
|
sendDontHave: bool = false,
|
||||||
): Future[void] =
|
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Send a want message to peer
|
## Send a want message to peer
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -154,14 +168,14 @@ proc sendWantList*(
|
|||||||
|
|
||||||
proc sendWantCancellations*(
|
proc sendWantCancellations*(
|
||||||
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
|
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
|
||||||
): Future[void] {.async.} =
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||||
## Informs a remote peer that we're no longer interested in a set of blocks
|
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||||
##
|
##
|
||||||
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||||
|
|
||||||
proc handleBlocksDelivery(
|
proc handleBlocksDelivery(
|
||||||
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
|
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
|
||||||
) {.async.} =
|
) {.async: (raises: []).} =
|
||||||
## Handle incoming blocks
|
## Handle incoming blocks
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -170,7 +184,7 @@ proc handleBlocksDelivery(
|
|||||||
|
|
||||||
proc sendBlocksDelivery*(
|
proc sendBlocksDelivery*(
|
||||||
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
|
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||||
): Future[void] =
|
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Send blocks to remote
|
## Send blocks to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -178,7 +192,7 @@ proc sendBlocksDelivery*(
|
|||||||
|
|
||||||
proc handleBlockPresence(
|
proc handleBlockPresence(
|
||||||
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
|
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
|
||||||
) {.async.} =
|
) {.async: (raises: []).} =
|
||||||
## Handle block presence
|
## Handle block presence
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -187,7 +201,7 @@ proc handleBlockPresence(
|
|||||||
|
|
||||||
proc sendBlockPresence*(
|
proc sendBlockPresence*(
|
||||||
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
|
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
|
||||||
): Future[void] =
|
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Send presence to remote
|
## Send presence to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -195,20 +209,24 @@ proc sendBlockPresence*(
|
|||||||
|
|
||||||
proc handleAccount(
|
proc handleAccount(
|
||||||
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
||||||
) {.async.} =
|
) {.async: (raises: []).} =
|
||||||
## Handle account info
|
## Handle account info
|
||||||
##
|
##
|
||||||
|
|
||||||
if not network.handlers.onAccount.isNil:
|
if not network.handlers.onAccount.isNil:
|
||||||
await network.handlers.onAccount(peer.id, account)
|
await network.handlers.onAccount(peer.id, account)
|
||||||
|
|
||||||
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
|
proc sendAccount*(
|
||||||
|
b: BlockExcNetwork, id: PeerId, account: Account
|
||||||
|
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Send account info to remote
|
## Send account info to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
b.send(id, Message(account: AccountMessage.init(account)))
|
b.send(id, Message(account: AccountMessage.init(account)))
|
||||||
|
|
||||||
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
|
proc sendPayment*(
|
||||||
|
b: BlockExcNetwork, id: PeerId, payment: SignedState
|
||||||
|
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Send payment to remote
|
## Send payment to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -216,7 +234,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[
|
|||||||
|
|
||||||
proc handlePayment(
|
proc handlePayment(
|
||||||
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
||||||
) {.async.} =
|
) {.async: (raises: []).} =
|
||||||
## Handle payment
|
## Handle payment
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -224,99 +242,123 @@ proc handlePayment(
|
|||||||
await network.handlers.onPayment(peer.id, payment)
|
await network.handlers.onPayment(peer.id, payment)
|
||||||
|
|
||||||
proc rpcHandler(
|
proc rpcHandler(
|
||||||
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||||
) {.async: (raises: [CatchableError]).} =
|
) {.async: (raises: []).} =
|
||||||
## handle rpc messages
|
## handle rpc messages
|
||||||
##
|
##
|
||||||
if msg.wantList.entries.len > 0:
|
if msg.wantList.entries.len > 0:
|
||||||
b.trackedFutures.track(b.handleWantList(peer, msg.wantList))
|
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
|
||||||
|
|
||||||
if msg.payload.len > 0:
|
if msg.payload.len > 0:
|
||||||
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload))
|
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
|
||||||
|
|
||||||
if msg.blockPresences.len > 0:
|
if msg.blockPresences.len > 0:
|
||||||
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences))
|
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
|
||||||
|
|
||||||
if account =? Account.init(msg.account):
|
if account =? Account.init(msg.account):
|
||||||
b.trackedFutures.track(b.handleAccount(peer, account))
|
self.trackedFutures.track(self.handleAccount(peer, account))
|
||||||
|
|
||||||
if payment =? SignedState.init(msg.payment):
|
if payment =? SignedState.init(msg.payment):
|
||||||
b.trackedFutures.track(b.handlePayment(peer, payment))
|
self.trackedFutures.track(self.handlePayment(peer, payment))
|
||||||
|
|
||||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||||
## Creates or retrieves a BlockExcNetwork Peer
|
## Creates or retrieves a BlockExcNetwork Peer
|
||||||
##
|
##
|
||||||
|
|
||||||
if peer in b.peers:
|
if peer in self.peers:
|
||||||
return b.peers.getOrDefault(peer, nil)
|
return self.peers.getOrDefault(peer, nil)
|
||||||
|
|
||||||
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
var getConn: ConnProvider = proc(): Future[Connection] {.
|
||||||
|
async: (raises: [CancelledError])
|
||||||
|
.} =
|
||||||
try:
|
try:
|
||||||
trace "Getting new connection stream", peer
|
trace "Getting new connection stream", peer
|
||||||
return await b.switch.dial(peer, Codec)
|
return await self.switch.dial(peer, Codec)
|
||||||
except CancelledError as error:
|
except CancelledError as error:
|
||||||
raise error
|
raise error
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Unable to connect to blockexc peer", exc = exc.msg
|
trace "Unable to connect to blockexc peer", exc = exc.msg
|
||||||
|
|
||||||
if not isNil(b.getConn):
|
if not isNil(self.getConn):
|
||||||
getConn = b.getConn
|
getConn = self.getConn
|
||||||
|
|
||||||
let rpcHandler = proc(
|
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
|
||||||
p: NetworkPeer, msg: Message
|
await self.rpcHandler(p, msg)
|
||||||
) {.async: (raises: [CatchableError]).} =
|
|
||||||
await b.rpcHandler(p, msg)
|
|
||||||
|
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
|
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
|
||||||
debug "Created new blockexc peer", peer
|
debug "Created new blockexc peer", peer
|
||||||
|
|
||||||
b.peers[peer] = blockExcPeer
|
self.peers[peer] = blockExcPeer
|
||||||
|
|
||||||
return blockExcPeer
|
return blockExcPeer
|
||||||
|
|
||||||
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
|
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||||
## Perform initial setup, such as want
|
|
||||||
## list exchange
|
|
||||||
##
|
|
||||||
|
|
||||||
discard b.getOrCreatePeer(peer)
|
|
||||||
|
|
||||||
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
|
||||||
## Dial a peer
|
## Dial a peer
|
||||||
##
|
##
|
||||||
|
|
||||||
if b.isSelf(peer.peerId):
|
if self.isSelf(peer.peerId):
|
||||||
trace "Skipping dialing self", peer = peer.peerId
|
trace "Skipping dialing self", peer = peer.peerId
|
||||||
return
|
return
|
||||||
|
|
||||||
if peer.peerId in b.peers:
|
if peer.peerId in self.peers:
|
||||||
trace "Already connected to peer", peer = peer.peerId
|
trace "Already connected to peer", peer = peer.peerId
|
||||||
return
|
return
|
||||||
|
|
||||||
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||||
|
|
||||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
|
proc dropPeer*(
|
||||||
|
self: BlockExcNetwork, peer: PeerId
|
||||||
|
) {.async: (raises: [CancelledError]).} =
|
||||||
|
trace "Dropping peer", peer
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not self.switch.isNil:
|
||||||
|
await self.switch.disconnect(peer)
|
||||||
|
except CatchableError as error:
|
||||||
|
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
|
||||||
|
|
||||||
|
if not self.handlers.onPeerDropped.isNil:
|
||||||
|
await self.handlers.onPeerDropped(peer)
|
||||||
|
|
||||||
|
proc handlePeerJoined*(
|
||||||
|
self: BlockExcNetwork, peer: PeerId
|
||||||
|
) {.async: (raises: [CancelledError]).} =
|
||||||
|
discard self.getOrCreatePeer(peer)
|
||||||
|
if not self.handlers.onPeerJoined.isNil:
|
||||||
|
await self.handlers.onPeerJoined(peer)
|
||||||
|
|
||||||
|
proc handlePeerDeparted*(
|
||||||
|
self: BlockExcNetwork, peer: PeerId
|
||||||
|
) {.async: (raises: [CancelledError]).} =
|
||||||
## Cleanup disconnected peer
|
## Cleanup disconnected peer
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Dropping peer", peer
|
trace "Cleaning up departed peer", peer
|
||||||
b.peers.del(peer)
|
self.peers.del(peer)
|
||||||
|
if not self.handlers.onPeerDeparted.isNil:
|
||||||
|
await self.handlers.onPeerDeparted(peer)
|
||||||
|
|
||||||
method init*(self: BlockExcNetwork) =
|
method init*(self: BlockExcNetwork) {.raises: [].} =
|
||||||
## Perform protocol initialization
|
## Perform protocol initialization
|
||||||
##
|
##
|
||||||
|
|
||||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
proc peerEventHandler(
|
||||||
|
peerId: PeerId, event: PeerEvent
|
||||||
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||||
if event.kind == PeerEventKind.Joined:
|
if event.kind == PeerEventKind.Joined:
|
||||||
self.setupPeer(peerId)
|
await self.handlePeerJoined(peerId)
|
||||||
|
elif event.kind == PeerEventKind.Left:
|
||||||
|
await self.handlePeerDeparted(peerId)
|
||||||
else:
|
else:
|
||||||
self.dropPeer(peerId)
|
warn "Unknown peer event", event
|
||||||
|
|
||||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||||
|
|
||||||
proc handler(conn: Connection, proto: string) {.async.} =
|
proc handler(
|
||||||
|
conn: Connection, proto: string
|
||||||
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||||
let peerId = conn.peerId
|
let peerId = conn.peerId
|
||||||
let blockexcPeer = self.getOrCreatePeer(peerId)
|
let blockexcPeer = self.getOrCreatePeer(peerId)
|
||||||
await blockexcPeer.readLoop(conn) # attach read loop
|
await blockexcPeer.readLoop(conn) # attach read loop
|
||||||
@ -353,26 +395,32 @@ proc new*(
|
|||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false,
|
sendDontHave: bool = false,
|
||||||
): Future[void] {.gcsafe.} =
|
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
|
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
|
||||||
|
|
||||||
proc sendWantCancellations(
|
proc sendWantCancellations(
|
||||||
id: PeerId, addresses: seq[BlockAddress]
|
id: PeerId, addresses: seq[BlockAddress]
|
||||||
): Future[void] {.gcsafe.} =
|
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.sendWantCancellations(id, addresses)
|
self.sendWantCancellations(id, addresses)
|
||||||
|
|
||||||
proc sendBlocksDelivery(
|
proc sendBlocksDelivery(
|
||||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||||
): Future[void] {.gcsafe.} =
|
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.sendBlocksDelivery(id, blocksDelivery)
|
self.sendBlocksDelivery(id, blocksDelivery)
|
||||||
|
|
||||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
proc sendPresence(
|
||||||
|
id: PeerId, presence: seq[BlockPresence]
|
||||||
|
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.sendBlockPresence(id, presence)
|
self.sendBlockPresence(id, presence)
|
||||||
|
|
||||||
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
|
proc sendAccount(
|
||||||
|
id: PeerId, account: Account
|
||||||
|
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.sendAccount(id, account)
|
self.sendAccount(id, account)
|
||||||
|
|
||||||
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
proc sendPayment(
|
||||||
|
id: PeerId, payment: SignedState
|
||||||
|
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.sendPayment(id, payment)
|
self.sendPayment(id, payment)
|
||||||
|
|
||||||
self.request = BlockExcRequest(
|
self.request = BlockExcRequest(
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,9 +7,7 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [].}
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
@ -18,6 +16,7 @@ import ../protobuf/blockexc
|
|||||||
import ../protobuf/message
|
import ../protobuf/message
|
||||||
import ../../errors
|
import ../../errors
|
||||||
import ../../logutils
|
import ../../logutils
|
||||||
|
import ../../utils/trackedfutures
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex blockexcnetworkpeer"
|
topics = "codex blockexcnetworkpeer"
|
||||||
@ -25,11 +24,9 @@ logScope:
|
|||||||
const DefaultYieldInterval = 50.millis
|
const DefaultYieldInterval = 50.millis
|
||||||
|
|
||||||
type
|
type
|
||||||
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
|
||||||
|
|
||||||
RPCHandler* = proc(
|
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
|
||||||
peer: NetworkPeer, msg: Message
|
|
||||||
): Future[void].Raising(CatchableError) {.gcsafe.}
|
|
||||||
|
|
||||||
NetworkPeer* = ref object of RootObj
|
NetworkPeer* = ref object of RootObj
|
||||||
id*: PeerId
|
id*: PeerId
|
||||||
@ -37,56 +34,68 @@ type
|
|||||||
sendConn: Connection
|
sendConn: Connection
|
||||||
getConn: ConnProvider
|
getConn: ConnProvider
|
||||||
yieldInterval*: Duration = DefaultYieldInterval
|
yieldInterval*: Duration = DefaultYieldInterval
|
||||||
|
trackedFutures: TrackedFutures
|
||||||
|
|
||||||
proc connected*(b: NetworkPeer): bool =
|
proc connected*(self: NetworkPeer): bool =
|
||||||
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
|
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
|
||||||
|
|
||||||
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
|
||||||
if isNil(conn):
|
if isNil(conn):
|
||||||
trace "No connection to read from", peer = b.id
|
trace "No connection to read from", peer = self.id
|
||||||
return
|
return
|
||||||
|
|
||||||
trace "Attaching read loop", peer = b.id, connId = conn.oid
|
trace "Attaching read loop", peer = self.id, connId = conn.oid
|
||||||
try:
|
try:
|
||||||
var nextYield = Moment.now() + b.yieldInterval
|
var nextYield = Moment.now() + self.yieldInterval
|
||||||
while not conn.atEof or not conn.closed:
|
while not conn.atEof or not conn.closed:
|
||||||
if Moment.now() > nextYield:
|
if Moment.now() > nextYield:
|
||||||
nextYield = Moment.now() + b.yieldInterval
|
nextYield = Moment.now() + self.yieldInterval
|
||||||
trace "Yielding in read loop",
|
trace "Yielding in read loop",
|
||||||
peer = b.id, nextYield = nextYield, interval = b.yieldInterval
|
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
|
||||||
await sleepAsync(10.millis)
|
await sleepAsync(10.millis)
|
||||||
|
|
||||||
let
|
let
|
||||||
data = await conn.readLp(MaxMessageSize.int)
|
data = await conn.readLp(MaxMessageSize.int)
|
||||||
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
||||||
trace "Received message", peer = b.id, connId = conn.oid
|
trace "Received message", peer = self.id, connId = conn.oid
|
||||||
await b.handler(b, msg)
|
await self.handler(self, msg)
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
trace "Read loop cancelled"
|
trace "Read loop cancelled"
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
warn "Exception in blockexc read loop", msg = err.msg
|
warn "Exception in blockexc read loop", msg = err.msg
|
||||||
finally:
|
finally:
|
||||||
trace "Detaching read loop", peer = b.id, connId = conn.oid
|
warn "Detaching read loop", peer = self.id, connId = conn.oid
|
||||||
|
if self.sendConn == conn:
|
||||||
|
self.sendConn = nil
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
|
proc connect*(
|
||||||
if b.connected:
|
self: NetworkPeer
|
||||||
trace "Already connected", peer = b.id, connId = b.sendConn.oid
|
): Future[Connection] {.async: (raises: [CancelledError]).} =
|
||||||
return b.sendConn
|
if self.connected:
|
||||||
|
trace "Already connected", peer = self.id, connId = self.sendConn.oid
|
||||||
|
return self.sendConn
|
||||||
|
|
||||||
b.sendConn = await b.getConn()
|
self.sendConn = await self.getConn()
|
||||||
asyncSpawn b.readLoop(b.sendConn)
|
self.trackedFutures.track(self.readLoop(self.sendConn))
|
||||||
return b.sendConn
|
return self.sendConn
|
||||||
|
|
||||||
proc send*(b: NetworkPeer, msg: Message) {.async.} =
|
proc send*(
|
||||||
let conn = await b.connect()
|
self: NetworkPeer, msg: Message
|
||||||
|
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||||
|
let conn = await self.connect()
|
||||||
|
|
||||||
if isNil(conn):
|
if isNil(conn):
|
||||||
warn "Unable to get send connection for peer message not sent", peer = b.id
|
warn "Unable to get send connection for peer message not sent", peer = self.id
|
||||||
return
|
return
|
||||||
|
|
||||||
trace "Sending message", peer = b.id, connId = conn.oid
|
trace "Sending message", peer = self.id, connId = conn.oid
|
||||||
await conn.writeLp(protobufEncode(msg))
|
try:
|
||||||
|
await conn.writeLp(protobufEncode(msg))
|
||||||
|
except CatchableError as err:
|
||||||
|
if self.sendConn == conn:
|
||||||
|
self.sendConn = nil
|
||||||
|
raise newException(LPStreamError, "Failed to send message: " & err.msg)
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
T: type NetworkPeer,
|
T: type NetworkPeer,
|
||||||
@ -96,4 +105,9 @@ func new*(
|
|||||||
): NetworkPeer =
|
): NetworkPeer =
|
||||||
doAssert(not isNil(connProvider), "should supply connection provider")
|
doAssert(not isNil(connProvider), "should supply connection provider")
|
||||||
|
|
||||||
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)
|
NetworkPeer(
|
||||||
|
id: peer,
|
||||||
|
getConn: connProvider,
|
||||||
|
handler: rpcHandler,
|
||||||
|
trackedFutures: TrackedFutures(),
|
||||||
|
)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -25,28 +25,77 @@ import ../../logutils
|
|||||||
|
|
||||||
export payments, nitro
|
export payments, nitro
|
||||||
|
|
||||||
|
const
|
||||||
|
MinRefreshInterval = 1.seconds
|
||||||
|
MaxRefreshBackoff = 36 # 36 seconds
|
||||||
|
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
|
||||||
|
|
||||||
type BlockExcPeerCtx* = ref object of RootObj
|
type BlockExcPeerCtx* = ref object of RootObj
|
||||||
id*: PeerId
|
id*: PeerId
|
||||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||||
peerWants*: seq[WantListEntry] # remote peers want lists
|
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
|
||||||
exchanged*: int # times peer has exchanged with us
|
exchanged*: int # times peer has exchanged with us
|
||||||
lastExchange*: Moment # last time peer has exchanged with us
|
refreshInProgress*: bool # indicates if a refresh is in progress
|
||||||
|
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
|
||||||
|
refreshBackoff*: int = 1 # backoff factor for refresh requests
|
||||||
account*: ?Account # ethereum account of this peer
|
account*: ?Account # ethereum account of this peer
|
||||||
paymentChannel*: ?ChannelId # payment channel id
|
paymentChannel*: ?ChannelId # payment channel id
|
||||||
|
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
|
||||||
|
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
|
||||||
|
lastExchange*: Moment # last time peer has sent us a block
|
||||||
|
activityTimeout*: Duration
|
||||||
|
lastSentWants*: HashSet[BlockAddress]
|
||||||
|
# track what wantList we last sent for delta updates
|
||||||
|
|
||||||
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
|
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
|
||||||
toSeq(self.blocks.keys)
|
let staleness =
|
||||||
|
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
|
||||||
|
|
||||||
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
if staleness and self.refreshInProgress:
|
||||||
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
|
trace "Cleaning up refresh state", peer = self.id
|
||||||
|
self.refreshInProgress = false
|
||||||
|
self.refreshBackoff = 1
|
||||||
|
|
||||||
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
staleness
|
||||||
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
|
|
||||||
|
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||||
|
address in self.blocksSent
|
||||||
|
|
||||||
|
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||||
|
self.blocksSent.incl(address)
|
||||||
|
|
||||||
|
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||||
|
self.blocksSent.excl(address)
|
||||||
|
|
||||||
|
proc refreshRequested*(self: BlockExcPeerCtx) =
|
||||||
|
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
|
||||||
|
self.refreshInProgress = true
|
||||||
|
self.lastRefresh = Moment.now()
|
||||||
|
|
||||||
|
proc refreshReplied*(self: BlockExcPeerCtx) =
|
||||||
|
self.refreshInProgress = false
|
||||||
|
self.lastRefresh = Moment.now()
|
||||||
|
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
|
||||||
|
|
||||||
|
proc havesUpdated(self: BlockExcPeerCtx) =
|
||||||
|
self.refreshBackoff = 1
|
||||||
|
|
||||||
|
proc wantsUpdated*(self: BlockExcPeerCtx) =
|
||||||
|
self.refreshBackoff = 1
|
||||||
|
|
||||||
|
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
|
||||||
|
# XXX: this is ugly an inefficient, but since those will typically
|
||||||
|
# be used in "joins", it's better to pay the price here and have
|
||||||
|
# a linear join than to not do it and have a quadratic join.
|
||||||
|
toHashSet(self.blocks.keys.toSeq)
|
||||||
|
|
||||||
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||||
address in self.blocks
|
address in self.blocks
|
||||||
|
|
||||||
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
||||||
|
if presence.address notin self.blocks:
|
||||||
|
self.havesUpdated()
|
||||||
|
|
||||||
self.blocks[presence.address] = presence
|
self.blocks[presence.address] = presence
|
||||||
|
|
||||||
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
||||||
@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
|
|||||||
price += precense[].price
|
price += precense[].price
|
||||||
|
|
||||||
price
|
price
|
||||||
|
|
||||||
|
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||||
|
## Adds a block the set of blocks that have been requested to this peer
|
||||||
|
## (its request schedule).
|
||||||
|
if self.blocksRequested.len == 0:
|
||||||
|
self.lastExchange = Moment.now()
|
||||||
|
self.blocksRequested.incl(address)
|
||||||
|
|
||||||
|
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||||
|
## Removes a block from the set of blocks that have been requested to this peer
|
||||||
|
## (its request schedule).
|
||||||
|
self.blocksRequested.excl(address)
|
||||||
|
|
||||||
|
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||||
|
let wasRequested = address in self.blocksRequested
|
||||||
|
self.blocksRequested.excl(address)
|
||||||
|
self.lastExchange = Moment.now()
|
||||||
|
wasRequested
|
||||||
|
|
||||||
|
proc activityTimer*(
|
||||||
|
self: BlockExcPeerCtx
|
||||||
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||||
|
## This is called by the block exchange when a block is scheduled for this peer.
|
||||||
|
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
|
||||||
|
## and the peer is dropped. Note that ANY block that the peer sends will reset this
|
||||||
|
## timer for all blocks.
|
||||||
|
##
|
||||||
|
while true:
|
||||||
|
let idleTime = Moment.now() - self.lastExchange
|
||||||
|
if idleTime > self.activityTimeout:
|
||||||
|
return
|
||||||
|
|
||||||
|
await sleepAsync(self.activityTimeout - idleTime)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,16 +7,13 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/algorithm
|
import std/algorithm
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/upraises
|
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
|
||||||
@ -65,21 +62,23 @@ func len*(self: PeerCtxStore): int =
|
|||||||
self.peers.len
|
self.peers.len
|
||||||
|
|
||||||
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||||
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
|
toSeq(self.peers.values).filterIt(address in it.peerHave)
|
||||||
|
|
||||||
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||||
|
# FIXME: this is way slower and can end up leading to unexpected performance loss.
|
||||||
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
|
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
|
||||||
|
|
||||||
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||||
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
|
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
|
||||||
|
|
||||||
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||||
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
|
# FIXME: this is way slower and can end up leading to unexpected performance loss.
|
||||||
|
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
|
||||||
|
|
||||||
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
|
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
|
||||||
var res: PeersForBlock = (@[], @[])
|
var res: PeersForBlock = (@[], @[])
|
||||||
for peer in self:
|
for peer in self:
|
||||||
if peer.peerHave.anyIt(it == address):
|
if address in peer:
|
||||||
res.with.add(peer)
|
res.with.add(peer)
|
||||||
else:
|
else:
|
||||||
res.without.add(peer)
|
res.without.add(peer)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -9,7 +9,6 @@
|
|||||||
|
|
||||||
import std/hashes
|
import std/hashes
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import pkg/stew/endians2
|
|
||||||
|
|
||||||
import message
|
import message
|
||||||
|
|
||||||
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
|
|||||||
export BlockDelivery, BlockPresenceType, BlockPresence
|
export BlockDelivery, BlockPresenceType, BlockPresence
|
||||||
export AccountMessage, StateChannelUpdate
|
export AccountMessage, StateChannelUpdate
|
||||||
|
|
||||||
proc hash*(a: BlockAddress): Hash =
|
|
||||||
if a.leaf:
|
|
||||||
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
|
||||||
hash(data)
|
|
||||||
else:
|
|
||||||
hash(a.cid.data.buffer)
|
|
||||||
|
|
||||||
proc hash*(e: WantListEntry): Hash =
|
proc hash*(e: WantListEntry): Hash =
|
||||||
hash(e.address)
|
hash(e.address)
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
# Protocol of data exchange between Codex nodes
|
# Protocol of data exchange between Logos Storage nodes
|
||||||
# and Protobuf encoder/decoder for these messages.
|
# and Protobuf encoder/decoder for these messages.
|
||||||
#
|
#
|
||||||
# Eventually all this code should be auto-generated from message.proto.
|
# Eventually all this code should be auto-generated from message.proto.
|
||||||
@ -25,11 +25,15 @@ type
|
|||||||
|
|
||||||
WantListEntry* = object
|
WantListEntry* = object
|
||||||
address*: BlockAddress
|
address*: BlockAddress
|
||||||
|
# XXX: I think explicit priority is pointless as the peer will request
|
||||||
|
# the blocks in the order it wants to receive them, and all we have to
|
||||||
|
# do is process those in the same order as we send them back. It also
|
||||||
|
# complicates things for no reason at the moment, as the priority is
|
||||||
|
# always set to 0.
|
||||||
priority*: int32 # The priority (normalized). default to 1
|
priority*: int32 # The priority (normalized). default to 1
|
||||||
cancel*: bool # Whether this revokes an entry
|
cancel*: bool # Whether this revokes an entry
|
||||||
wantType*: WantType # Note: defaults to enum 0, ie Block
|
wantType*: WantType # Note: defaults to enum 0, ie Block
|
||||||
sendDontHave*: bool # Note: defaults to false
|
sendDontHave*: bool # Note: defaults to false
|
||||||
inFlight*: bool # Whether block sending is in progress. Not serialized.
|
|
||||||
|
|
||||||
WantList* = object
|
WantList* = object
|
||||||
entries*: seq[WantListEntry] # A list of wantList entries
|
entries*: seq[WantListEntry] # A list of wantList entries
|
||||||
@ -97,7 +101,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
|||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||||
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
var ipb = initProtoBuffer()
|
||||||
ipb.write(1, value.blk.cid.data.buffer)
|
ipb.write(1, value.blk.cid.data.buffer)
|
||||||
ipb.write(2, value.blk.data)
|
ipb.write(2, value.blk.data)
|
||||||
ipb.write(3, value.address)
|
ipb.write(3, value.address)
|
||||||
@ -128,7 +132,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
|||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
|
||||||
proc protobufEncode*(value: Message): seq[byte] =
|
proc protobufEncode*(value: Message): seq[byte] =
|
||||||
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
var ipb = initProtoBuffer()
|
||||||
ipb.write(1, value.wantList)
|
ipb.write(1, value.wantList)
|
||||||
for v in value.payload:
|
for v in value.payload:
|
||||||
ipb.write(3, v)
|
ipb.write(3, v)
|
||||||
@ -254,16 +258,14 @@ proc decode*(
|
|||||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||||
var
|
var
|
||||||
value = Message()
|
value = Message()
|
||||||
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
pb = initProtoBuffer(msg)
|
||||||
ipb: ProtoBuffer
|
ipb: ProtoBuffer
|
||||||
sublist: seq[seq[byte]]
|
sublist: seq[seq[byte]]
|
||||||
if ?pb.getField(1, ipb):
|
if ?pb.getField(1, ipb):
|
||||||
value.wantList = ?WantList.decode(ipb)
|
value.wantList = ?WantList.decode(ipb)
|
||||||
if ?pb.getRepeatedField(3, sublist):
|
if ?pb.getRepeatedField(3, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.payload.add(
|
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
|
||||||
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
|
|
||||||
)
|
|
||||||
if ?pb.getRepeatedField(4, sublist):
|
if ?pb.getRepeatedField(4, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
// Protocol of data exchange between Codex nodes.
|
// Protocol of data exchange between Logos Storage nodes.
|
||||||
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
|
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
|
||||||
|
|
||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|||||||
@ -1,8 +1,9 @@
|
|||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/upraises
|
|
||||||
import ./blockexc
|
import ./blockexc
|
||||||
|
|
||||||
export AccountMessage
|
export AccountMessage
|
||||||
@ -11,9 +12,6 @@ export StateChannelUpdate
|
|||||||
export stint
|
export stint
|
||||||
export nitro
|
export nitro
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
type Account* = object
|
type Account* = object
|
||||||
address*: EthAddress
|
address*: EthAddress
|
||||||
|
|
||||||
|
|||||||
@ -1,8 +1,9 @@
|
|||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import libp2p
|
import libp2p
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/upraises
|
|
||||||
import ./blockexc
|
import ./blockexc
|
||||||
|
|
||||||
import ../../blocktype
|
import ../../blocktype
|
||||||
@ -11,9 +12,6 @@ export questionable
|
|||||||
export stint
|
export stint
|
||||||
export BlockPresenceType
|
export BlockPresenceType
|
||||||
|
|
||||||
upraises.push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
type
|
type
|
||||||
PresenceMessage* = blockexc.BlockPresence
|
PresenceMessage* = blockexc.BlockPresence
|
||||||
Presence* = object
|
Presence* = object
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -9,16 +9,14 @@
|
|||||||
|
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/sugar
|
import std/sugar
|
||||||
|
import std/hashes
|
||||||
|
|
||||||
export tables
|
export tables
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/libp2p/[cid, multicodec, multihash]
|
import pkg/libp2p/[cid, multicodec, multihash]
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/[byteutils, endians2]
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
||||||
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
|
|||||||
else:
|
else:
|
||||||
"cid: " & $a.cid
|
"cid: " & $a.cid
|
||||||
|
|
||||||
|
proc hash*(a: BlockAddress): Hash =
|
||||||
|
if a.leaf:
|
||||||
|
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||||
|
hash(data)
|
||||||
|
else:
|
||||||
|
hash(a.cid.data.buffer)
|
||||||
|
|
||||||
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||||
if a.leaf: a.treeCid else: a.cid
|
if a.leaf: a.treeCid else: a.cid
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -9,10 +9,7 @@
|
|||||||
|
|
||||||
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
|
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
@ -28,8 +25,11 @@ const DefaultChunkSize* = DefaultBlockSize
|
|||||||
|
|
||||||
type
|
type
|
||||||
# default reader type
|
# default reader type
|
||||||
|
ChunkerError* = object of CatchableError
|
||||||
ChunkBuffer* = ptr UncheckedArray[byte]
|
ChunkBuffer* = ptr UncheckedArray[byte]
|
||||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
|
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
|
||||||
|
async: (raises: [ChunkerError, CancelledError])
|
||||||
|
.}
|
||||||
|
|
||||||
# Reader that splits input data into fixed-size chunks
|
# Reader that splits input data into fixed-size chunks
|
||||||
Chunker* = ref object
|
Chunker* = ref object
|
||||||
@ -74,7 +74,7 @@ proc new*(
|
|||||||
|
|
||||||
proc reader(
|
proc reader(
|
||||||
data: ChunkBuffer, len: int
|
data: ChunkBuffer, len: int
|
||||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||||
var res = 0
|
var res = 0
|
||||||
try:
|
try:
|
||||||
while res < len:
|
while res < len:
|
||||||
@ -85,7 +85,7 @@ proc new*(
|
|||||||
raise error
|
raise error
|
||||||
except LPStreamError as error:
|
except LPStreamError as error:
|
||||||
error "LPStream error", err = error.msg
|
error "LPStream error", err = error.msg
|
||||||
raise error
|
raise newException(ChunkerError, "LPStream error", error)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
error "CatchableError exception", exc = exc.msg
|
error "CatchableError exception", exc = exc.msg
|
||||||
raise newException(Defect, exc.msg)
|
raise newException(Defect, exc.msg)
|
||||||
@ -102,7 +102,7 @@ proc new*(
|
|||||||
|
|
||||||
proc reader(
|
proc reader(
|
||||||
data: ChunkBuffer, len: int
|
data: ChunkBuffer, len: int
|
||||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||||
var total = 0
|
var total = 0
|
||||||
try:
|
try:
|
||||||
while total < len:
|
while total < len:
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/stew/endians2
|
import pkg/stew/endians2
|
||||||
import pkg/upraises
|
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
|
|
||||||
type
|
type
|
||||||
@ -8,10 +9,12 @@ type
|
|||||||
SecondsSince1970* = int64
|
SecondsSince1970* = int64
|
||||||
Timeout* = object of CatchableError
|
Timeout* = object of CatchableError
|
||||||
|
|
||||||
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} =
|
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
method waitUntil*(
|
||||||
|
clock: Clock, time: SecondsSince1970
|
||||||
|
) {.base, async: (raises: [CancelledError]).} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method start*(clock: Clock) {.base, async.} =
|
method start*(clock: Clock) {.base, async.} =
|
||||||
|
|||||||
102
codex/codex.nim
102
codex/codex.nim
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -12,6 +12,7 @@ import std/strutils
|
|||||||
import std/os
|
import std/os
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/cpuinfo
|
import std/cpuinfo
|
||||||
|
import std/net
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/taskpools
|
import pkg/taskpools
|
||||||
@ -21,14 +22,13 @@ import pkg/confutils
|
|||||||
import pkg/confutils/defs
|
import pkg/confutils/defs
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
import pkg/stew/shims/net as stewnet
|
|
||||||
import pkg/datastore
|
import pkg/datastore
|
||||||
import pkg/ethers except Rng
|
import pkg/ethers except Rng
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
|
|
||||||
import ./node
|
import ./node
|
||||||
import ./conf
|
import ./conf
|
||||||
import ./rng
|
import ./rng as random
|
||||||
import ./rest/api
|
import ./rest/api
|
||||||
import ./stores
|
import ./stores
|
||||||
import ./slots
|
import ./slots
|
||||||
@ -56,10 +56,21 @@ type
|
|||||||
codexNode: CodexNodeRef
|
codexNode: CodexNodeRef
|
||||||
repoStore: RepoStore
|
repoStore: RepoStore
|
||||||
maintenance: BlockMaintainer
|
maintenance: BlockMaintainer
|
||||||
|
taskpool: Taskpool
|
||||||
|
isStarted: bool
|
||||||
|
|
||||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||||
EthWallet = ethers.Wallet
|
EthWallet = ethers.Wallet
|
||||||
|
|
||||||
|
func config*(self: CodexServer): CodexConf =
|
||||||
|
return self.config
|
||||||
|
|
||||||
|
func node*(self: CodexServer): CodexNodeRef =
|
||||||
|
return self.codexNode
|
||||||
|
|
||||||
|
func repoStore*(self: CodexServer): RepoStore =
|
||||||
|
return self.repoStore
|
||||||
|
|
||||||
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||||
var sleepTime = 1
|
var sleepTime = 1
|
||||||
trace "Checking sync state of Ethereum provider..."
|
trace "Checking sync state of Ethereum provider..."
|
||||||
@ -83,7 +94,9 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
|||||||
error "Persistence enabled, but no Ethereum account was set"
|
error "Persistence enabled, but no Ethereum account was set"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
let provider = JsonRpcProvider.new(
|
||||||
|
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
|
||||||
|
)
|
||||||
await waitForSync(provider)
|
await waitForSync(provider)
|
||||||
var signer: Signer
|
var signer: Signer
|
||||||
if account =? config.ethAccount:
|
if account =? config.ethAccount:
|
||||||
@ -103,7 +116,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
|||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
signer = wallet
|
signer = wallet
|
||||||
|
|
||||||
let deploy = Deployment.new(provider, config)
|
let deploy = Deployment.new(provider, config.marketplaceAddress)
|
||||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||||
error "No Marketplace address was specified or there is no known address for the current network"
|
error "No Marketplace address was specified or there is no known address for the current network"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
@ -125,7 +138,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
|||||||
|
|
||||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||||
# and hence the proof failure will always be 0.
|
# and hence the proof failure will always be 0.
|
||||||
when codex_enable_proof_failures:
|
when storage_enable_proof_failures:
|
||||||
let proofFailures = config.simulateProofFailures
|
let proofFailures = config.simulateProofFailures
|
||||||
if proofFailures > 0:
|
if proofFailures > 0:
|
||||||
warn "Enabling proof failure simulation!"
|
warn "Enabling proof failure simulation!"
|
||||||
@ -134,6 +147,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
|||||||
if config.simulateProofFailures > 0:
|
if config.simulateProofFailures > 0:
|
||||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||||
|
|
||||||
|
if error =? (await market.loadConfig()).errorOption:
|
||||||
|
fatal "Cannot load market configuration", error = error.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
let purchasing = Purchasing.new(market, clock)
|
let purchasing = Purchasing.new(market, clock)
|
||||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||||
client = some ClientInteractions.new(clock, purchasing)
|
client = some ClientInteractions.new(clock, purchasing)
|
||||||
@ -152,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
|||||||
s.codexNode.contracts = (client, host, validator)
|
s.codexNode.contracts = (client, host, validator)
|
||||||
|
|
||||||
proc start*(s: CodexServer) {.async.} =
|
proc start*(s: CodexServer) {.async.} =
|
||||||
trace "Starting codex node", config = $s.config
|
if s.isStarted:
|
||||||
|
warn "Storage server already started, skipping"
|
||||||
|
return
|
||||||
|
|
||||||
|
trace "Starting Storage node", config = $s.config
|
||||||
await s.repoStore.start()
|
await s.repoStore.start()
|
||||||
|
|
||||||
s.maintenance.start()
|
s.maintenance.start()
|
||||||
|
|
||||||
await s.codexNode.switch.start()
|
await s.codexNode.switch.start()
|
||||||
@ -168,18 +189,55 @@ proc start*(s: CodexServer) {.async.} =
|
|||||||
|
|
||||||
await s.bootstrapInteractions()
|
await s.bootstrapInteractions()
|
||||||
await s.codexNode.start()
|
await s.codexNode.start()
|
||||||
s.restServer.start()
|
|
||||||
|
if s.restServer != nil:
|
||||||
|
s.restServer.start()
|
||||||
|
|
||||||
|
s.isStarted = true
|
||||||
|
|
||||||
proc stop*(s: CodexServer) {.async.} =
|
proc stop*(s: CodexServer) {.async.} =
|
||||||
notice "Stopping codex node"
|
if not s.isStarted:
|
||||||
|
warn "Storage is not started"
|
||||||
|
return
|
||||||
|
|
||||||
await allFuturesThrowing(
|
notice "Stopping Storage node"
|
||||||
s.restServer.stop(),
|
|
||||||
s.codexNode.switch.stop(),
|
var futures =
|
||||||
s.codexNode.stop(),
|
@[
|
||||||
s.repoStore.stop(),
|
s.codexNode.switch.stop(),
|
||||||
s.maintenance.stop(),
|
s.codexNode.stop(),
|
||||||
)
|
s.repoStore.stop(),
|
||||||
|
s.maintenance.stop(),
|
||||||
|
]
|
||||||
|
|
||||||
|
if s.restServer != nil:
|
||||||
|
futures.add(s.restServer.stop())
|
||||||
|
|
||||||
|
let res = await noCancel allFinishedFailed[void](futures)
|
||||||
|
|
||||||
|
if res.failure.len > 0:
|
||||||
|
error "Failed to stop Storage node", failures = res.failure.len
|
||||||
|
raiseAssert "Failed to stop Storage node"
|
||||||
|
|
||||||
|
proc close*(s: CodexServer) {.async.} =
|
||||||
|
var futures = @[s.codexNode.close(), s.repoStore.close()]
|
||||||
|
|
||||||
|
let res = await noCancel allFinishedFailed[void](futures)
|
||||||
|
|
||||||
|
if not s.taskpool.isNil:
|
||||||
|
try:
|
||||||
|
s.taskpool.shutdown()
|
||||||
|
except Exception as exc:
|
||||||
|
error "Failed to stop the taskpool", failures = res.failure.len
|
||||||
|
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
|
||||||
|
|
||||||
|
if res.failure.len > 0:
|
||||||
|
error "Failed to close Storage node", failures = res.failure.len
|
||||||
|
raiseAssert "Failed to close Storage node"
|
||||||
|
|
||||||
|
proc shutdown*(server: CodexServer) {.async.} =
|
||||||
|
await server.stop()
|
||||||
|
await server.close()
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
||||||
@ -189,13 +247,13 @@ proc new*(
|
|||||||
.new()
|
.new()
|
||||||
.withPrivateKey(privateKey)
|
.withPrivateKey(privateKey)
|
||||||
.withAddresses(config.listenAddrs)
|
.withAddresses(config.listenAddrs)
|
||||||
.withRng(Rng.instance())
|
.withRng(random.Rng.instance())
|
||||||
.withNoise()
|
.withNoise()
|
||||||
.withMplex(5.minutes, 5.minutes)
|
.withMplex(5.minutes, 5.minutes)
|
||||||
.withMaxConnections(config.maxPeers)
|
.withMaxConnections(config.maxPeers)
|
||||||
.withAgentVersion(config.agentString)
|
.withAgentVersion(config.agentString)
|
||||||
.withSignedPeerRecord(true)
|
.withSignedPeerRecord(true)
|
||||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
|
||||||
.build()
|
.build()
|
||||||
|
|
||||||
var
|
var
|
||||||
@ -279,7 +337,7 @@ proc new*(
|
|||||||
)
|
)
|
||||||
|
|
||||||
peerStore = PeerCtxStore.new()
|
peerStore = PeerCtxStore.new()
|
||||||
pendingBlocks = PendingBlocksManager.new()
|
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
|
||||||
advertiser = Advertiser.new(repoStore, discovery)
|
advertiser = Advertiser.new(repoStore, discovery)
|
||||||
blockDiscovery =
|
blockDiscovery =
|
||||||
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||||
@ -304,10 +362,13 @@ proc new*(
|
|||||||
taskPool = taskpool,
|
taskPool = taskpool,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var restServer: RestServerRef = nil
|
||||||
|
|
||||||
|
if config.apiBindAddress.isSome:
|
||||||
restServer = RestServerRef
|
restServer = RestServerRef
|
||||||
.new(
|
.new(
|
||||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||||
initTAddress(config.apiBindAddress, config.apiPort),
|
initTAddress(config.apiBindAddress.get(), config.apiPort),
|
||||||
bufferSize = (1024 * 64),
|
bufferSize = (1024 * 64),
|
||||||
maxRequestBodySize = int.high,
|
maxRequestBodySize = int.high,
|
||||||
)
|
)
|
||||||
@ -321,4 +382,5 @@ proc new*(
|
|||||||
restServer: restServer,
|
restServer: restServer,
|
||||||
repoStore: repoStore,
|
repoStore: repoStore,
|
||||||
maintenance: maintenance,
|
maintenance: maintenance,
|
||||||
|
taskpool: taskpool,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
203
codex/conf.nim
203
codex/conf.nim
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
|
|||||||
{.pop.}
|
{.pop.}
|
||||||
|
|
||||||
import std/options
|
import std/options
|
||||||
|
import std/parseutils
|
||||||
import std/strutils
|
import std/strutils
|
||||||
import std/typetraits
|
import std/typetraits
|
||||||
|
import std/net
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles/helpers
|
import pkg/chronicles/helpers
|
||||||
@ -27,13 +29,12 @@ import pkg/confutils/std/net
|
|||||||
import pkg/toml_serialization
|
import pkg/toml_serialization
|
||||||
import pkg/metrics
|
import pkg/metrics
|
||||||
import pkg/metrics/chronos_httpserver
|
import pkg/metrics/chronos_httpserver
|
||||||
import pkg/stew/shims/net as stewnet
|
|
||||||
import pkg/stew/shims/parseutils
|
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
import pkg/stew/base64
|
||||||
|
|
||||||
import ./codextypes
|
import ./codextypes
|
||||||
import ./discovery
|
import ./discovery
|
||||||
@ -44,15 +45,16 @@ import ./utils
|
|||||||
import ./nat
|
import ./nat
|
||||||
import ./utils/natutils
|
import ./utils/natutils
|
||||||
|
|
||||||
from ./contracts/config import DefaultRequestCacheSize
|
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
|
||||||
from ./validationconfig import MaxSlots, ValidationGroups
|
from ./validationconfig import MaxSlots, ValidationGroups
|
||||||
|
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
|
||||||
|
|
||||||
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
||||||
export ValidationGroups, MaxSlots
|
export ValidationGroups, MaxSlots
|
||||||
|
|
||||||
export
|
export
|
||||||
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
|
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
|
||||||
DefaultRequestCacheSize
|
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
|
||||||
|
|
||||||
type ThreadCount* = distinct Natural
|
type ThreadCount* = distinct Natural
|
||||||
|
|
||||||
@ -61,21 +63,19 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.}
|
|||||||
proc defaultDataDir*(): string =
|
proc defaultDataDir*(): string =
|
||||||
let dataDir =
|
let dataDir =
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
"AppData" / "Roaming" / "Codex"
|
"AppData" / "Roaming" / "Storage"
|
||||||
elif defined(macosx):
|
elif defined(macosx):
|
||||||
"Library" / "Application Support" / "Codex"
|
"Library" / "Application Support" / "Storage"
|
||||||
else:
|
else:
|
||||||
".cache" / "codex"
|
".cache" / "storage"
|
||||||
|
|
||||||
getHomeDir() / dataDir
|
getHomeDir() / dataDir
|
||||||
|
|
||||||
const
|
const
|
||||||
codex_enable_api_debug_peers* {.booldefine.} = false
|
storage_enable_api_debug_peers* {.booldefine.} = false
|
||||||
codex_enable_proof_failures* {.booldefine.} = false
|
storage_enable_proof_failures* {.booldefine.} = false
|
||||||
codex_enable_log_counter* {.booldefine.} = false
|
storage_enable_log_counter* {.booldefine.} = false
|
||||||
|
|
||||||
DefaultDataDir* = defaultDataDir()
|
|
||||||
DefaultCircuitDir* = defaultDataDir() / "circuits"
|
|
||||||
DefaultThreadCount* = ThreadCount(0)
|
DefaultThreadCount* = ThreadCount(0)
|
||||||
|
|
||||||
type
|
type
|
||||||
@ -137,9 +137,9 @@ type
|
|||||||
.}: Port
|
.}: Port
|
||||||
|
|
||||||
dataDir* {.
|
dataDir* {.
|
||||||
desc: "The directory where codex will store configuration and data",
|
desc: "The directory where Storage will store configuration and data",
|
||||||
defaultValue: DefaultDataDir,
|
defaultValue: defaultDataDir(),
|
||||||
defaultValueDesc: $DefaultDataDir,
|
defaultValueDesc: "",
|
||||||
abbr: "d",
|
abbr: "d",
|
||||||
name: "data-dir"
|
name: "data-dir"
|
||||||
.}: OutDir
|
.}: OutDir
|
||||||
@ -198,14 +198,16 @@ type
|
|||||||
.}: ThreadCount
|
.}: ThreadCount
|
||||||
|
|
||||||
agentString* {.
|
agentString* {.
|
||||||
defaultValue: "Codex",
|
defaultValue: "Logos Storage",
|
||||||
desc: "Node agent string which is used as identifier in network",
|
desc: "Node agent string which is used as identifier in network",
|
||||||
name: "agent-string"
|
name: "agent-string"
|
||||||
.}: string
|
.}: string
|
||||||
|
|
||||||
apiBindAddress* {.
|
apiBindAddress* {.
|
||||||
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
|
desc: "The REST API bind address",
|
||||||
.}: string
|
defaultValue: "127.0.0.1".some,
|
||||||
|
name: "api-bindaddr"
|
||||||
|
.}: Option[string]
|
||||||
|
|
||||||
apiPort* {.
|
apiPort* {.
|
||||||
desc: "The REST Api port",
|
desc: "The REST Api port",
|
||||||
@ -263,6 +265,13 @@ type
|
|||||||
name: "block-mn"
|
name: "block-mn"
|
||||||
.}: int
|
.}: int
|
||||||
|
|
||||||
|
blockRetries* {.
|
||||||
|
desc: "Number of times to retry fetching a block before giving up",
|
||||||
|
defaultValue: DefaultBlockRetries,
|
||||||
|
defaultValueDesc: $DefaultBlockRetries,
|
||||||
|
name: "block-retries"
|
||||||
|
.}: int
|
||||||
|
|
||||||
cacheSize* {.
|
cacheSize* {.
|
||||||
desc:
|
desc:
|
||||||
"The size of the block cache, 0 disables the cache - " &
|
"The size of the block cache, 0 disables the cache - " &
|
||||||
@ -370,34 +379,43 @@ type
|
|||||||
hidden
|
hidden
|
||||||
.}: uint16
|
.}: uint16
|
||||||
|
|
||||||
|
maxPriorityFeePerGas* {.
|
||||||
|
desc:
|
||||||
|
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
|
||||||
|
defaultValue: DefaultMaxPriorityFeePerGas,
|
||||||
|
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
|
||||||
|
name: "max-priority-fee-per-gas",
|
||||||
|
hidden
|
||||||
|
.}: uint64
|
||||||
|
|
||||||
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
|
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
|
||||||
of PersistenceCmd.prover:
|
of PersistenceCmd.prover:
|
||||||
circuitDir* {.
|
circuitDir* {.
|
||||||
desc: "Directory where Codex will store proof circuit data",
|
desc: "Directory where Storage will store proof circuit data",
|
||||||
defaultValue: DefaultCircuitDir,
|
defaultValue: defaultDataDir() / "circuits",
|
||||||
defaultValueDesc: $DefaultCircuitDir,
|
defaultValueDesc: "data/circuits",
|
||||||
abbr: "cd",
|
abbr: "cd",
|
||||||
name: "circuit-dir"
|
name: "circuit-dir"
|
||||||
.}: OutDir
|
.}: OutDir
|
||||||
|
|
||||||
circomR1cs* {.
|
circomR1cs* {.
|
||||||
desc: "The r1cs file for the storage circuit",
|
desc: "The r1cs file for the storage circuit",
|
||||||
defaultValue: $DefaultCircuitDir / "proof_main.r1cs",
|
defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
|
||||||
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs",
|
defaultValueDesc: "data/circuits/proof_main.r1cs",
|
||||||
name: "circom-r1cs"
|
name: "circom-r1cs"
|
||||||
.}: InputFile
|
.}: InputFile
|
||||||
|
|
||||||
circomWasm* {.
|
circomWasm* {.
|
||||||
desc: "The wasm file for the storage circuit",
|
desc: "The wasm file for the storage circuit",
|
||||||
defaultValue: $DefaultCircuitDir / "proof_main.wasm",
|
defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
|
||||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm",
|
defaultValueDesc: "data/circuits/proof_main.wasm",
|
||||||
name: "circom-wasm"
|
name: "circom-wasm"
|
||||||
.}: InputFile
|
.}: InputFile
|
||||||
|
|
||||||
circomZkey* {.
|
circomZkey* {.
|
||||||
desc: "The zkey file for the storage circuit",
|
desc: "The zkey file for the storage circuit",
|
||||||
defaultValue: $DefaultCircuitDir / "proof_main.zkey",
|
defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
|
||||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey",
|
defaultValueDesc: "data/circuits/proof_main.zkey",
|
||||||
name: "circom-zkey"
|
name: "circom-zkey"
|
||||||
.}: InputFile
|
.}: InputFile
|
||||||
|
|
||||||
@ -467,7 +485,7 @@ func prover*(self: CodexConf): bool =
|
|||||||
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
||||||
|
|
||||||
proc getCodexVersion(): string =
|
proc getCodexVersion(): string =
|
||||||
let tag = strip(staticExec("git tag"))
|
let tag = strip(staticExec("git describe --tags --abbrev=0"))
|
||||||
if tag.isEmptyOrWhitespace:
|
if tag.isEmptyOrWhitespace:
|
||||||
return "untagged build"
|
return "untagged build"
|
||||||
return tag
|
return tag
|
||||||
@ -477,76 +495,100 @@ proc getCodexRevision(): string =
|
|||||||
var res = strip(staticExec("git rev-parse --short HEAD"))
|
var res = strip(staticExec("git rev-parse --short HEAD"))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
proc getCodexContractsRevision(): string =
|
||||||
|
let res =
|
||||||
|
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
|
||||||
|
return res
|
||||||
|
|
||||||
proc getNimBanner(): string =
|
proc getNimBanner(): string =
|
||||||
staticExec("nim --version | grep Version")
|
staticExec("nim --version | grep Version")
|
||||||
|
|
||||||
const
|
const
|
||||||
codexVersion* = getCodexVersion()
|
codexVersion* = getCodexVersion()
|
||||||
codexRevision* = getCodexRevision()
|
codexRevision* = getCodexRevision()
|
||||||
|
codexContractsRevision* = getCodexContractsRevision()
|
||||||
nimBanner* = getNimBanner()
|
nimBanner* = getNimBanner()
|
||||||
|
|
||||||
codexFullVersion* =
|
codexFullVersion* =
|
||||||
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
|
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
|
||||||
nimBanner
|
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
|
||||||
|
|
||||||
proc parseCmdArg*(
|
proc parseCmdArg*(
|
||||||
T: typedesc[MultiAddress], input: string
|
T: typedesc[MultiAddress], input: string
|
||||||
): MultiAddress {.upraises: [ValueError].} =
|
): MultiAddress {.raises: [ValueError].} =
|
||||||
var ma: MultiAddress
|
var ma: MultiAddress
|
||||||
try:
|
try:
|
||||||
let res = MultiAddress.init(input)
|
let res = MultiAddress.init(input)
|
||||||
if res.isOk:
|
if res.isOk:
|
||||||
ma = res.get()
|
ma = res.get()
|
||||||
else:
|
else:
|
||||||
warn "Invalid MultiAddress", input = input, error = res.error()
|
fatal "Invalid MultiAddress", input = input, error = res.error()
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
except LPError as exc:
|
except LPError as exc:
|
||||||
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
|
fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
ma
|
ma
|
||||||
|
|
||||||
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} =
|
proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
|
||||||
let count = parseInt(input)
|
try:
|
||||||
if count != 0 and count < 2:
|
let count = parseInt(p)
|
||||||
warn "Invalid number of threads", input = input
|
if count != 0 and count < 2:
|
||||||
quit QuitFailure
|
return err("Invalid number of threads: " & p)
|
||||||
ThreadCount(count)
|
return ok(ThreadCount(count))
|
||||||
|
except ValueError as e:
|
||||||
|
return err("Invalid number of threads: " & p & ", error=" & e.msg)
|
||||||
|
|
||||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
proc parseCmdArg*(T: type ThreadCount, input: string): T =
|
||||||
|
let val = ThreadCount.parse(input)
|
||||||
|
if val.isErr:
|
||||||
|
fatal "Cannot parse the thread count.", input = input, error = val.error()
|
||||||
|
quit QuitFailure
|
||||||
|
return val.get()
|
||||||
|
|
||||||
|
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
|
||||||
var res: SignedPeerRecord
|
var res: SignedPeerRecord
|
||||||
try:
|
try:
|
||||||
if not res.fromURI(uri):
|
if not res.fromURI(p):
|
||||||
warn "Invalid SignedPeerRecord uri", uri = uri
|
return err("The uri is not a valid SignedPeerRecord: " & p)
|
||||||
quit QuitFailure
|
return ok(res)
|
||||||
except LPError as exc:
|
except LPError, Base64Error:
|
||||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
let e = getCurrentException()
|
||||||
quit QuitFailure
|
return err(e.msg)
|
||||||
except CatchableError as exc:
|
|
||||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
|
||||||
quit QuitFailure
|
|
||||||
res
|
|
||||||
|
|
||||||
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
|
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||||
|
let res = SignedPeerRecord.parse(uri)
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
|
||||||
|
quit QuitFailure
|
||||||
|
return res.get()
|
||||||
|
|
||||||
|
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
|
||||||
case p.toLowerAscii
|
case p.toLowerAscii
|
||||||
of "any":
|
of "any":
|
||||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
|
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
|
||||||
of "none":
|
of "none":
|
||||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
|
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
|
||||||
of "upnp":
|
of "upnp":
|
||||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
|
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
|
||||||
of "pmp":
|
of "pmp":
|
||||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
|
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
|
||||||
else:
|
else:
|
||||||
if p.startsWith("extip:"):
|
if p.startsWith("extip:"):
|
||||||
try:
|
try:
|
||||||
let ip = parseIpAddress(p[6 ..^ 1])
|
let ip = parseIpAddress(p[6 ..^ 1])
|
||||||
NatConfig(hasExtIp: true, extIp: ip)
|
return ok(NatConfig(hasExtIp: true, extIp: ip))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
let error = "Not a valid IP address: " & p[6 ..^ 1]
|
let error = "Not a valid IP address: " & p[6 ..^ 1]
|
||||||
raise newException(ValueError, error)
|
return err(error)
|
||||||
else:
|
else:
|
||||||
let error = "Not a valid NAT option: " & p
|
return err("Not a valid NAT option: " & p)
|
||||||
raise newException(ValueError, error)
|
|
||||||
|
proc parseCmdArg*(T: type NatConfig, p: string): T =
|
||||||
|
let res = NatConfig.parse(p)
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Cannot parse the NAT config.", error = res.error(), input = p
|
||||||
|
quit QuitFailure
|
||||||
|
return res.get()
|
||||||
|
|
||||||
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
||||||
return @[]
|
return @[]
|
||||||
@ -554,25 +596,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
|||||||
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
||||||
EthAddress.init($address).get()
|
EthAddress.init($address).get()
|
||||||
|
|
||||||
proc parseCmdArg*(T: type NBytes, val: string): T =
|
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
|
||||||
var num = 0'i64
|
var num = 0'i64
|
||||||
let count = parseSize(val, num, alwaysBin = true)
|
let count = parseSize(p, num, alwaysBin = true)
|
||||||
if count == 0:
|
if count == 0:
|
||||||
warn "Invalid number of bytes", nbytes = val
|
return err("Invalid number of bytes: " & p)
|
||||||
|
return ok(NBytes(num))
|
||||||
|
|
||||||
|
proc parseCmdArg*(T: type NBytes, val: string): T =
|
||||||
|
let res = NBytes.parse(val)
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Cannot parse NBytes.", error = res.error(), input = val
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
NBytes(num)
|
return res.get()
|
||||||
|
|
||||||
proc parseCmdArg*(T: type Duration, val: string): T =
|
proc parseCmdArg*(T: type Duration, val: string): T =
|
||||||
var dur: Duration
|
var dur: Duration
|
||||||
let count = parseDuration(val, dur)
|
let count = parseDuration(val, dur)
|
||||||
if count == 0:
|
if count == 0:
|
||||||
warn "Cannot parse duration", dur = dur
|
fatal "Cannot parse duration", dur = dur
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
dur
|
dur
|
||||||
|
|
||||||
proc readValue*(
|
proc readValue*(
|
||||||
r: var TomlReader, val: var EthAddress
|
r: var TomlReader, val: var EthAddress
|
||||||
) {.upraises: [SerializationError, IOError].} =
|
) {.raises: [SerializationError, IOError].} =
|
||||||
val = EthAddress.init(r.readValue(string)).get()
|
val = EthAddress.init(r.readValue(string)).get()
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||||
@ -583,7 +631,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
|||||||
try:
|
try:
|
||||||
val = SignedPeerRecord.parseCmdArg(uri)
|
val = SignedPeerRecord.parseCmdArg(uri)
|
||||||
except LPError as err:
|
except LPError as err:
|
||||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
|
fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||||
@ -595,12 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
|||||||
if res.isOk:
|
if res.isOk:
|
||||||
val = res.get()
|
val = res.get()
|
||||||
else:
|
else:
|
||||||
warn "Invalid MultiAddress", input = input, error = res.error()
|
fatal "Invalid MultiAddress", input = input, error = res.error()
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
proc readValue*(
|
proc readValue*(
|
||||||
r: var TomlReader, val: var NBytes
|
r: var TomlReader, val: var NBytes
|
||||||
) {.upraises: [SerializationError, IOError].} =
|
) {.raises: [SerializationError, IOError].} =
|
||||||
var value = 0'i64
|
var value = 0'i64
|
||||||
var str = r.readValue(string)
|
var str = r.readValue(string)
|
||||||
let count = parseSize(str, value, alwaysBin = true)
|
let count = parseSize(str, value, alwaysBin = true)
|
||||||
@ -611,7 +659,7 @@ proc readValue*(
|
|||||||
|
|
||||||
proc readValue*(
|
proc readValue*(
|
||||||
r: var TomlReader, val: var ThreadCount
|
r: var TomlReader, val: var ThreadCount
|
||||||
) {.upraises: [SerializationError, IOError].} =
|
) {.raises: [SerializationError, IOError].} =
|
||||||
var str = r.readValue(string)
|
var str = r.readValue(string)
|
||||||
try:
|
try:
|
||||||
val = parseCmdArg(ThreadCount, str)
|
val = parseCmdArg(ThreadCount, str)
|
||||||
@ -620,7 +668,7 @@ proc readValue*(
|
|||||||
|
|
||||||
proc readValue*(
|
proc readValue*(
|
||||||
r: var TomlReader, val: var Duration
|
r: var TomlReader, val: var Duration
|
||||||
) {.upraises: [SerializationError, IOError].} =
|
) {.raises: [SerializationError, IOError].} =
|
||||||
var str = r.readValue(string)
|
var str = r.readValue(string)
|
||||||
var dur: Duration
|
var dur: Duration
|
||||||
let count = parseDuration(str, dur)
|
let count = parseDuration(str, dur)
|
||||||
@ -687,7 +735,7 @@ proc stripAnsi*(v: string): string =
|
|||||||
|
|
||||||
res
|
res
|
||||||
|
|
||||||
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
|
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
|
||||||
# Updates log levels (without clearing old ones)
|
# Updates log levels (without clearing old ones)
|
||||||
let directives = logLevel.split(";")
|
let directives = logLevel.split(";")
|
||||||
try:
|
try:
|
||||||
@ -756,7 +804,7 @@ proc setupLogging*(conf: CodexConf) =
|
|||||||
of LogKind.None:
|
of LogKind.None:
|
||||||
noOutput
|
noOutput
|
||||||
|
|
||||||
when codex_enable_log_counter:
|
when storage_enable_log_counter:
|
||||||
var counter = 0.uint64
|
var counter = 0.uint64
|
||||||
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
|
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
|
||||||
inc(counter)
|
inc(counter)
|
||||||
@ -767,15 +815,6 @@ proc setupLogging*(conf: CodexConf) =
|
|||||||
else:
|
else:
|
||||||
defaultChroniclesStream.outputs[0].writer = writer
|
defaultChroniclesStream.outputs[0].writer = writer
|
||||||
|
|
||||||
try:
|
|
||||||
updateLogLevel(conf.logLevel)
|
|
||||||
except ValueError as err:
|
|
||||||
try:
|
|
||||||
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
|
|
||||||
except IOError:
|
|
||||||
echo "Invalid value for --log-level. " & err.msg
|
|
||||||
quit QuitFailure
|
|
||||||
|
|
||||||
proc setupMetrics*(config: CodexConf) =
|
proc setupMetrics*(config: CodexConf) =
|
||||||
if config.metricsEnabled:
|
if config.metricsEnabled:
|
||||||
let metricsAddress = config.metricsAddress
|
let metricsAddress = config.metricsAddress
|
||||||
|
|||||||
8
codex/contentids_exts.nim
Normal file
8
codex/contentids_exts.nim
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
const ContentIdsExts = [
|
||||||
|
multiCodec("codex-root"),
|
||||||
|
multiCodec("codex-manifest"),
|
||||||
|
multiCodec("codex-block"),
|
||||||
|
multiCodec("codex-slot-root"),
|
||||||
|
multiCodec("codex-proving-root"),
|
||||||
|
multiCodec("codex-slot-cell"),
|
||||||
|
]
|
||||||
@ -1,13 +1,13 @@
|
|||||||
Codex Contracts in Nim
|
Logos Storage Contracts in Nim
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
Nim API for the [Codex smart contracts][1].
|
Nim API for the [Logos Storage smart contracts][1].
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
||||||
For a global overview of the steps involved in starting and fulfilling a
|
For a global overview of the steps involved in starting and fulfilling a
|
||||||
storage contract, see [Codex Contracts][1].
|
storage contract, see [Logos Storage Contracts][1].
|
||||||
|
|
||||||
Smart contract
|
Smart contract
|
||||||
--------------
|
--------------
|
||||||
@ -144,5 +144,5 @@ await storage
|
|||||||
.markProofAsMissing(id, period)
|
.markProofAsMissing(id, period)
|
||||||
```
|
```
|
||||||
|
|
||||||
[1]: https://github.com/status-im/codex-contracts-eth/
|
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
|
||||||
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md
|
||||||
|
|||||||
@ -1,3 +1,5 @@
|
|||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/times
|
import std/times
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
@ -5,6 +7,7 @@ import pkg/chronos
|
|||||||
import pkg/stint
|
import pkg/stint
|
||||||
import ../clock
|
import ../clock
|
||||||
import ../conf
|
import ../conf
|
||||||
|
import ../utils/trackedfutures
|
||||||
|
|
||||||
export clock
|
export clock
|
||||||
|
|
||||||
@ -18,9 +21,12 @@ type OnChainClock* = ref object of Clock
|
|||||||
blockNumber: UInt256
|
blockNumber: UInt256
|
||||||
started: bool
|
started: bool
|
||||||
newBlock: AsyncEvent
|
newBlock: AsyncEvent
|
||||||
|
trackedFutures: TrackedFutures
|
||||||
|
|
||||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||||
OnChainClock(provider: provider, newBlock: newAsyncEvent())
|
OnChainClock(
|
||||||
|
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
|
||||||
|
)
|
||||||
|
|
||||||
proc update(clock: OnChainClock, blck: Block) =
|
proc update(clock: OnChainClock, blck: Block) =
|
||||||
if number =? blck.number and number > clock.blockNumber:
|
if number =? blck.number and number > clock.blockNumber:
|
||||||
@ -32,15 +38,12 @@ proc update(clock: OnChainClock, blck: Block) =
|
|||||||
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
||||||
clock.newBlock.fire()
|
clock.newBlock.fire()
|
||||||
|
|
||||||
proc update(clock: OnChainClock) {.async.} =
|
proc update(clock: OnChainClock) {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||||
clock.update(latest)
|
clock.update(latest)
|
||||||
except CancelledError as error:
|
|
||||||
raise error
|
|
||||||
except CatchableError as error:
|
except CatchableError as error:
|
||||||
debug "error updating clock: ", error = error.msg
|
debug "error updating clock: ", error = error.msg
|
||||||
discard
|
|
||||||
|
|
||||||
method start*(clock: OnChainClock) {.async.} =
|
method start*(clock: OnChainClock) {.async.} =
|
||||||
if clock.started:
|
if clock.started:
|
||||||
@ -52,7 +55,7 @@ method start*(clock: OnChainClock) {.async.} =
|
|||||||
return
|
return
|
||||||
|
|
||||||
# ignore block parameter; hardhat may call this with pending blocks
|
# ignore block parameter; hardhat may call this with pending blocks
|
||||||
asyncSpawn clock.update()
|
clock.trackedFutures.track(clock.update())
|
||||||
|
|
||||||
await clock.update()
|
await clock.update()
|
||||||
|
|
||||||
@ -64,13 +67,16 @@ method stop*(clock: OnChainClock) {.async.} =
|
|||||||
return
|
return
|
||||||
|
|
||||||
await clock.subscription.unsubscribe()
|
await clock.subscription.unsubscribe()
|
||||||
|
await clock.trackedFutures.cancelTracked()
|
||||||
clock.started = false
|
clock.started = false
|
||||||
|
|
||||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||||
doAssert clock.started, "clock should be started before calling now()"
|
doAssert clock.started, "clock should be started before calling now()"
|
||||||
return toUnix(getTime() + clock.offset)
|
return toUnix(getTime() + clock.offset)
|
||||||
|
|
||||||
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
|
method waitUntil*(
|
||||||
|
clock: OnChainClock, time: SecondsSince1970
|
||||||
|
) {.async: (raises: [CancelledError]).} =
|
||||||
while (let difference = time - clock.now(); difference > 0):
|
while (let difference = time - clock.now(); difference > 0):
|
||||||
clock.newBlock.clear()
|
clock.newBlock.clear()
|
||||||
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
||||||
|
|||||||
@ -1,10 +1,11 @@
|
|||||||
import pkg/contractabi
|
import pkg/contractabi
|
||||||
import pkg/ethers/fields
|
import pkg/ethers/contracts/fields
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
||||||
export contractabi
|
export contractabi
|
||||||
|
|
||||||
const DefaultRequestCacheSize* = 128.uint16
|
const DefaultRequestCacheSize* = 128.uint16
|
||||||
|
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
|
||||||
|
|
||||||
type
|
type
|
||||||
MarketplaceConfig* = object
|
MarketplaceConfig* = object
|
||||||
|
|||||||
@ -9,7 +9,7 @@ import ./marketplace
|
|||||||
|
|
||||||
type Deployment* = ref object
|
type Deployment* = ref object
|
||||||
provider: Provider
|
provider: Provider
|
||||||
config: CodexConf
|
marketplaceAddressOverride: ?Address
|
||||||
|
|
||||||
const knownAddresses = {
|
const knownAddresses = {
|
||||||
# Hardhat localhost network
|
# Hardhat localhost network
|
||||||
@ -18,9 +18,12 @@ const knownAddresses = {
|
|||||||
# Taiko Alpha-3 Testnet
|
# Taiko Alpha-3 Testnet
|
||||||
"167005":
|
"167005":
|
||||||
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
||||||
# Codex Testnet - Feb 25 2025 07:24:19 AM (+00:00 UTC)
|
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
|
||||||
"789987":
|
"789987":
|
||||||
{"Marketplace": Address.init("0xfFaF679D5Cbfdd5Dbc9Be61C616ed115DFb597ed")}.toTable,
|
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
|
||||||
|
# Linea (Status)
|
||||||
|
"1660990954":
|
||||||
|
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
|
||||||
}.toTable
|
}.toTable
|
||||||
|
|
||||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||||
@ -32,12 +35,16 @@ proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
|||||||
|
|
||||||
return knownAddresses[id].getOrDefault($T, Address.none)
|
return knownAddresses[id].getOrDefault($T, Address.none)
|
||||||
|
|
||||||
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
|
proc new*(
|
||||||
Deployment(provider: provider, config: config)
|
_: type Deployment,
|
||||||
|
provider: Provider,
|
||||||
|
marketplaceAddressOverride: ?Address = none Address,
|
||||||
|
): Deployment =
|
||||||
|
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
|
||||||
|
|
||||||
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
|
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
|
||||||
when contract is Marketplace:
|
when contract is Marketplace:
|
||||||
if address =? deployment.config.marketplaceAddress:
|
if address =? deployment.marketplaceAddressOverride:
|
||||||
return some address
|
return some address
|
||||||
|
|
||||||
let chainId = await deployment.provider.getChainId()
|
let chainId = await deployment.provider.getChainId()
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
|
import std/strformat
|
||||||
import std/strutils
|
import std/strutils
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
import pkg/upraises
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/lrucache
|
import pkg/lrucache
|
||||||
import ../utils/exceptions
|
import ../utils/exceptions
|
||||||
@ -22,6 +22,7 @@ type
|
|||||||
rewardRecipient: ?Address
|
rewardRecipient: ?Address
|
||||||
configuration: ?MarketplaceConfig
|
configuration: ?MarketplaceConfig
|
||||||
requestCache: LruCache[string, StorageRequest]
|
requestCache: LruCache[string, StorageRequest]
|
||||||
|
allowanceLock: AsyncLock
|
||||||
|
|
||||||
MarketSubscription = market.Subscription
|
MarketSubscription = market.Subscription
|
||||||
EventSubscription = ethers.Subscription
|
EventSubscription = ethers.Subscription
|
||||||
@ -49,130 +50,195 @@ func new*(
|
|||||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||||
raise newException(MarketError, message)
|
raise newException(MarketError, message)
|
||||||
|
|
||||||
template convertEthersError(body) =
|
func prefixWith(suffix, prefix: string, separator = ": "): string =
|
||||||
|
if prefix.len > 0:
|
||||||
|
return &"{prefix}{separator}{suffix}"
|
||||||
|
else:
|
||||||
|
return suffix
|
||||||
|
|
||||||
|
template convertEthersError(msg: string = "", body) =
|
||||||
try:
|
try:
|
||||||
body
|
body
|
||||||
except EthersError as error:
|
except EthersError as error:
|
||||||
raiseMarketError(error.msgDetail)
|
raiseMarketError(error.msgDetail.prefixWith(msg))
|
||||||
|
|
||||||
proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} =
|
proc config(
|
||||||
|
market: OnChainMarket
|
||||||
|
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
without resolvedConfig =? market.configuration:
|
without resolvedConfig =? market.configuration:
|
||||||
let fetchedConfig = await market.contract.configuration()
|
if err =? (await market.loadConfig()).errorOption:
|
||||||
market.configuration = some fetchedConfig
|
raiseMarketError(err.msg)
|
||||||
return fetchedConfig
|
|
||||||
|
without config =? market.configuration:
|
||||||
|
raiseMarketError("Failed to access to config from the Marketplace contract")
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
return resolvedConfig
|
return resolvedConfig
|
||||||
|
|
||||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
|
||||||
|
if market.allowanceLock.isNil:
|
||||||
|
market.allowanceLock = newAsyncLock()
|
||||||
|
await market.allowanceLock.acquire()
|
||||||
|
try:
|
||||||
|
body
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
market.allowanceLock.release()
|
||||||
|
except AsyncLockError as error:
|
||||||
|
raise newException(Defect, error.msg, error)
|
||||||
|
|
||||||
|
proc approveFunds(
|
||||||
|
market: OnChainMarket, amount: UInt256
|
||||||
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
debug "Approving tokens", amount
|
debug "Approving tokens", amount
|
||||||
convertEthersError:
|
convertEthersError("Failed to approve funds"):
|
||||||
let tokenAddress = await market.contract.token()
|
let tokenAddress = await market.contract.token()
|
||||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||||
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
|
let owner = await market.signer.getAddress()
|
||||||
|
let spender = market.contract.address
|
||||||
|
market.withAllowanceLock:
|
||||||
|
let allowance = await token.allowance(owner, spender)
|
||||||
|
discard await token.approve(spender, allowance + amount).confirm(1)
|
||||||
|
|
||||||
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
method loadConfig*(
|
||||||
|
market: OnChainMarket
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
|
try:
|
||||||
|
without config =? market.configuration:
|
||||||
|
let fetchedConfig = await market.contract.configuration()
|
||||||
|
|
||||||
|
market.configuration = some fetchedConfig
|
||||||
|
|
||||||
|
return success()
|
||||||
|
except EthersError as err:
|
||||||
|
return failure newException(
|
||||||
|
MarketError,
|
||||||
|
"Failed to fetch the config from the Marketplace contract: " & err.msg,
|
||||||
|
)
|
||||||
|
|
||||||
|
method getZkeyHash*(
|
||||||
|
market: OnChainMarket
|
||||||
|
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
let config = await market.config()
|
let config = await market.config()
|
||||||
return some config.proofs.zkeyHash
|
return some config.proofs.zkeyHash
|
||||||
|
|
||||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
method getSigner*(
|
||||||
convertEthersError:
|
market: OnChainMarket
|
||||||
|
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to get signer address"):
|
||||||
return await market.signer.getAddress()
|
return await market.signer.getAddress()
|
||||||
|
|
||||||
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
method periodicity*(
|
||||||
convertEthersError:
|
market: OnChainMarket
|
||||||
|
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to get Marketplace config"):
|
||||||
let config = await market.config()
|
let config = await market.config()
|
||||||
let period = config.proofs.period
|
let period = config.proofs.period
|
||||||
return Periodicity(seconds: period)
|
return Periodicity(seconds: period)
|
||||||
|
|
||||||
method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} =
|
method proofTimeout*(
|
||||||
convertEthersError:
|
market: OnChainMarket
|
||||||
|
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to get Marketplace config"):
|
||||||
let config = await market.config()
|
let config = await market.config()
|
||||||
return config.proofs.timeout
|
return config.proofs.timeout
|
||||||
|
|
||||||
method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} =
|
method repairRewardPercentage*(
|
||||||
convertEthersError:
|
market: OnChainMarket
|
||||||
|
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to get Marketplace config"):
|
||||||
let config = await market.config()
|
let config = await market.config()
|
||||||
return config.collateral.repairRewardPercentage
|
return config.collateral.repairRewardPercentage
|
||||||
|
|
||||||
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
|
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get Marketplace config"):
|
||||||
let config = await market.config()
|
let config = await market.config()
|
||||||
return config.requestDurationLimit
|
return config.requestDurationLimit
|
||||||
|
|
||||||
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
method proofDowntime*(
|
||||||
convertEthersError:
|
market: OnChainMarket
|
||||||
|
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to get Marketplace config"):
|
||||||
let config = await market.config()
|
let config = await market.config()
|
||||||
return config.proofs.downtime
|
return config.proofs.downtime
|
||||||
|
|
||||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get slot pointer"):
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.getPointer(slotId, overrides)
|
return await market.contract.getPointer(slotId, overrides)
|
||||||
|
|
||||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get my requests"):
|
||||||
return await market.contract.myRequests
|
return await market.contract.myRequests
|
||||||
|
|
||||||
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get my slots"):
|
||||||
let slots = await market.contract.mySlots()
|
let slots = await market.contract.mySlots()
|
||||||
debug "Fetched my slots", numSlots = len(slots)
|
debug "Fetched my slots", numSlots = len(slots)
|
||||||
|
|
||||||
return slots
|
return slots
|
||||||
|
|
||||||
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
|
method requestStorage(
|
||||||
convertEthersError:
|
market: OnChainMarket, request: StorageRequest
|
||||||
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to request storage"):
|
||||||
debug "Requesting storage"
|
debug "Requesting storage"
|
||||||
await market.approveFunds(request.totalPrice())
|
await market.approveFunds(request.totalPrice())
|
||||||
discard await market.contract.requestStorage(request).confirm(1)
|
discard await market.contract.requestStorage(request).confirm(1)
|
||||||
|
|
||||||
method getRequest*(
|
method getRequest*(
|
||||||
market: OnChainMarket, id: RequestId
|
market: OnChainMarket, id: RequestId
|
||||||
): Future[?StorageRequest] {.async.} =
|
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
|
||||||
let key = $id
|
try:
|
||||||
|
let key = $id
|
||||||
|
|
||||||
if market.requestCache.contains(key):
|
if key in market.requestCache:
|
||||||
return some market.requestCache[key]
|
return some market.requestCache[key]
|
||||||
|
|
||||||
convertEthersError:
|
let request = await market.contract.getRequest(id)
|
||||||
try:
|
market.requestCache[key] = request
|
||||||
let request = await market.contract.getRequest(id)
|
return some request
|
||||||
market.requestCache[key] = request
|
except Marketplace_UnknownRequest, KeyError:
|
||||||
return some request
|
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
|
||||||
except Marketplace_UnknownRequest:
|
return none StorageRequest
|
||||||
return none StorageRequest
|
except EthersError as e:
|
||||||
|
error "Cannot retrieve the request", error = e.msg
|
||||||
|
return none StorageRequest
|
||||||
|
|
||||||
method requestState*(
|
method requestState*(
|
||||||
market: OnChainMarket, requestId: RequestId
|
market: OnChainMarket, requestId: RequestId
|
||||||
): Future[?RequestState] {.async.} =
|
): Future[?RequestState] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get request state"):
|
||||||
try:
|
try:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return some await market.contract.requestState(requestId, overrides)
|
return some await market.contract.requestState(requestId, overrides)
|
||||||
except Marketplace_UnknownRequest:
|
except Marketplace_UnknownRequest:
|
||||||
return none RequestState
|
return none RequestState
|
||||||
|
|
||||||
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} =
|
method slotState*(
|
||||||
convertEthersError:
|
market: OnChainMarket, slotId: SlotId
|
||||||
|
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.slotState(slotId, overrides)
|
return await market.contract.slotState(slotId, overrides)
|
||||||
|
|
||||||
method getRequestEnd*(
|
method getRequestEnd*(
|
||||||
market: OnChainMarket, id: RequestId
|
market: OnChainMarket, id: RequestId
|
||||||
): Future[SecondsSince1970] {.async.} =
|
): Future[SecondsSince1970] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get request end"):
|
||||||
return await market.contract.requestEnd(id)
|
return await market.contract.requestEnd(id)
|
||||||
|
|
||||||
method requestExpiresAt*(
|
method requestExpiresAt*(
|
||||||
market: OnChainMarket, id: RequestId
|
market: OnChainMarket, id: RequestId
|
||||||
): Future[SecondsSince1970] {.async.} =
|
): Future[SecondsSince1970] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get request expiry"):
|
||||||
return await market.contract.requestExpiry(id)
|
return await market.contract.requestExpiry(id)
|
||||||
|
|
||||||
method getHost(
|
method getHost(
|
||||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||||
): Future[?Address] {.async.} =
|
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get slot's host"):
|
||||||
let slotId = slotId(requestId, slotIndex)
|
let slotId = slotId(requestId, slotIndex)
|
||||||
let address = await market.contract.getHost(slotId)
|
let address = await market.contract.getHost(slotId)
|
||||||
if address != Address.default:
|
if address != Address.default:
|
||||||
@ -182,12 +248,12 @@ method getHost(
|
|||||||
|
|
||||||
method currentCollateral*(
|
method currentCollateral*(
|
||||||
market: OnChainMarket, slotId: SlotId
|
market: OnChainMarket, slotId: SlotId
|
||||||
): Future[UInt256] {.async.} =
|
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get slot's current collateral"):
|
||||||
return await market.contract.currentCollateral(slotId)
|
return await market.contract.currentCollateral(slotId)
|
||||||
|
|
||||||
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get active slot"):
|
||||||
try:
|
try:
|
||||||
return some await market.contract.getActiveSlot(slotId)
|
return some await market.contract.getActiveSlot(slotId)
|
||||||
except Marketplace_SlotIsFree:
|
except Marketplace_SlotIsFree:
|
||||||
@ -199,42 +265,88 @@ method fillSlot(
|
|||||||
slotIndex: uint64,
|
slotIndex: uint64,
|
||||||
proof: Groth16Proof,
|
proof: Groth16Proof,
|
||||||
collateral: UInt256,
|
collateral: UInt256,
|
||||||
) {.async.} =
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to fill slot"):
|
||||||
logScope:
|
logScope:
|
||||||
requestId
|
requestId
|
||||||
slotIndex
|
slotIndex
|
||||||
|
|
||||||
await market.approveFunds(collateral)
|
try:
|
||||||
trace "calling fillSlot on contract"
|
await market.approveFunds(collateral)
|
||||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
|
|
||||||
trace "fillSlot transaction completed"
|
|
||||||
|
|
||||||
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||||
convertEthersError:
|
# happen to be the last one to fill a slot in this request
|
||||||
var freeSlot: Future[Confirmable]
|
trace "estimating gas for fillSlot"
|
||||||
if rewardRecipient =? market.rewardRecipient:
|
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
|
||||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
let gasLimit = (gas * 110) div 100
|
||||||
# the SP's address as the collateral recipient
|
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||||
let collateralRecipient = await market.getSigner()
|
|
||||||
freeSlot = market.contract.freeSlot(
|
|
||||||
slotId,
|
|
||||||
rewardRecipient, # --reward-recipient
|
|
||||||
collateralRecipient,
|
|
||||||
) # SP's address
|
|
||||||
else:
|
|
||||||
# Otherwise, use the SP's address as both the reward and collateral
|
|
||||||
# recipient (the contract will use msg.sender for both)
|
|
||||||
freeSlot = market.contract.freeSlot(slotId)
|
|
||||||
|
|
||||||
discard await freeSlot.confirm(1)
|
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||||
|
discard await market.contract
|
||||||
|
.fillSlot(requestId, slotIndex, proof, overrides)
|
||||||
|
.confirm(1)
|
||||||
|
trace "fillSlot transaction completed"
|
||||||
|
except Marketplace_SlotNotFree as parent:
|
||||||
|
raise newException(
|
||||||
|
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
|
||||||
|
parent,
|
||||||
|
)
|
||||||
|
|
||||||
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
|
method freeSlot*(
|
||||||
convertEthersError:
|
market: OnChainMarket, slotId: SlotId
|
||||||
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to free slot"):
|
||||||
|
try:
|
||||||
|
var freeSlot: Future[Confirmable]
|
||||||
|
if rewardRecipient =? market.rewardRecipient:
|
||||||
|
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||||
|
# the SP's address as the collateral recipient
|
||||||
|
let collateralRecipient = await market.getSigner()
|
||||||
|
|
||||||
|
# Add 200% to gas estimate to deal with different evm code flow when we
|
||||||
|
# happen to be the one to make the request fail
|
||||||
|
let gas = await market.contract.estimateGas.freeSlot(
|
||||||
|
slotId, rewardRecipient, collateralRecipient
|
||||||
|
)
|
||||||
|
let gasLimit = gas * 3
|
||||||
|
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||||
|
|
||||||
|
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||||
|
|
||||||
|
freeSlot = market.contract.freeSlot(
|
||||||
|
slotId,
|
||||||
|
rewardRecipient, # --reward-recipient
|
||||||
|
collateralRecipient, # SP's address
|
||||||
|
overrides,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Otherwise, use the SP's address as both the reward and collateral
|
||||||
|
# recipient (the contract will use msg.sender for both)
|
||||||
|
|
||||||
|
# Add 200% to gas estimate to deal with different evm code flow when we
|
||||||
|
# happen to be the one to make the request fail
|
||||||
|
let gas = await market.contract.estimateGas.freeSlot(slotId)
|
||||||
|
let gasLimit = gas * 3
|
||||||
|
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
|
||||||
|
|
||||||
|
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||||
|
|
||||||
|
freeSlot = market.contract.freeSlot(slotId, overrides)
|
||||||
|
|
||||||
|
discard await freeSlot.confirm(1)
|
||||||
|
except Marketplace_SlotIsFree as parent:
|
||||||
|
raise newException(
|
||||||
|
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
|
||||||
|
)
|
||||||
|
|
||||||
|
method withdrawFunds(
|
||||||
|
market: OnChainMarket, requestId: RequestId
|
||||||
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to withdraw funds"):
|
||||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||||
|
|
||||||
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get proof requirement"):
|
||||||
try:
|
try:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.isProofRequired(id, overrides)
|
return await market.contract.isProofRequired(id, overrides)
|
||||||
@ -242,7 +354,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async
|
|||||||
return false
|
return false
|
||||||
|
|
||||||
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get future proof requirement"):
|
||||||
try:
|
try:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.willProofBeRequired(id, overrides)
|
return await market.contract.willProofBeRequired(id, overrides)
|
||||||
@ -252,28 +364,42 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a
|
|||||||
method getChallenge*(
|
method getChallenge*(
|
||||||
market: OnChainMarket, id: SlotId
|
market: OnChainMarket, id: SlotId
|
||||||
): Future[ProofChallenge] {.async.} =
|
): Future[ProofChallenge] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get proof challenge"):
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.getChallenge(id, overrides)
|
return await market.contract.getChallenge(id, overrides)
|
||||||
|
|
||||||
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
|
method submitProof*(
|
||||||
convertEthersError:
|
market: OnChainMarket, id: SlotId, proof: Groth16Proof
|
||||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
convertEthersError("Failed to submit proof"):
|
||||||
|
try:
|
||||||
|
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||||
|
except Proofs_InvalidProof as parent:
|
||||||
|
raise newException(
|
||||||
|
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
|
||||||
|
)
|
||||||
|
|
||||||
method markProofAsMissing*(
|
method markProofAsMissing*(
|
||||||
market: OnChainMarket, id: SlotId, period: Period
|
market: OnChainMarket, id: SlotId, period: Period
|
||||||
) {.async.} =
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to mark proof as missing"):
|
||||||
discard await market.contract.markProofAsMissing(id, period).confirm(1)
|
# Add 50% to gas estimate to deal with different evm code flow when we
|
||||||
|
# happen to be the one to make the request fail
|
||||||
|
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
|
||||||
|
let gasLimit = (gas * 150) div 100
|
||||||
|
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||||
|
|
||||||
method canProofBeMarkedAsMissing*(
|
trace "calling markProofAsMissing on contract",
|
||||||
|
estimatedGas = gas, gasLimit = gasLimit
|
||||||
|
|
||||||
|
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
|
||||||
|
|
||||||
|
method canMarkProofAsMissing*(
|
||||||
market: OnChainMarket, id: SlotId, period: Period
|
market: OnChainMarket, id: SlotId, period: Period
|
||||||
): Future[bool] {.async.} =
|
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||||
let provider = market.contract.provider
|
|
||||||
let contractWithoutSigner = market.contract.connect(provider)
|
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
|
||||||
try:
|
try:
|
||||||
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
|
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
|
||||||
return true
|
return true
|
||||||
except EthersError as e:
|
except EthersError as e:
|
||||||
trace "Proof cannot be marked as missing", msg = e.msg
|
trace "Proof cannot be marked as missing", msg = e.msg
|
||||||
@ -281,48 +407,56 @@ method canProofBeMarkedAsMissing*(
|
|||||||
|
|
||||||
method reserveSlot*(
|
method reserveSlot*(
|
||||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||||
) {.async.} =
|
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to reserve slot"):
|
||||||
discard await market.contract
|
try:
|
||||||
.reserveSlot(
|
# Add 25% to gas estimate to deal with different evm code flow when we
|
||||||
requestId,
|
# happen to be the last one that is allowed to reserve the slot
|
||||||
slotIndex,
|
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
|
||||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
let gasLimit = (gas * 125) div 100
|
||||||
TransactionOverrides(gasLimit: some 100000.u256),
|
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||||
)
|
|
||||||
.confirm(1)
|
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||||
|
|
||||||
|
discard
|
||||||
|
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
|
||||||
|
except SlotReservations_ReservationNotAllowed:
|
||||||
|
raise newException(
|
||||||
|
SlotReservationNotAllowedError,
|
||||||
|
"Failed to reserve slot because reservation is not allowed",
|
||||||
|
)
|
||||||
|
|
||||||
method canReserveSlot*(
|
method canReserveSlot*(
|
||||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||||
): Future[bool] {.async.} =
|
): Future[bool] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Unable to determine if slot can be reserved"):
|
||||||
return await market.contract.canReserveSlot(requestId, slotIndex)
|
return await market.contract.canReserveSlot(requestId, slotIndex)
|
||||||
|
|
||||||
method subscribeRequests*(
|
method subscribeRequests*(
|
||||||
market: OnChainMarket, callback: OnRequest
|
market: OnChainMarket, callback: OnRequest
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
|
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in Request subscription", msg = eventErr.msg
|
error "There was an error in Request subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId, event.ask, event.expiry)
|
callback(event.requestId, event.ask, event.expiry)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to StorageRequested events"):
|
||||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeSlotFilled*(
|
method subscribeSlotFilled*(
|
||||||
market: OnChainMarket, callback: OnSlotFilled
|
market: OnChainMarket, callback: OnSlotFilled
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId, event.slotIndex)
|
callback(event.requestId, event.slotIndex)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
@ -336,27 +470,27 @@ method subscribeSlotFilled*(
|
|||||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||||
callback(requestId, slotIndex)
|
callback(requestId, slotIndex)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||||
return await market.subscribeSlotFilled(onSlotFilled)
|
return await market.subscribeSlotFilled(onSlotFilled)
|
||||||
|
|
||||||
method subscribeSlotFreed*(
|
method subscribeSlotFreed*(
|
||||||
market: OnChainMarket, callback: OnSlotFreed
|
market: OnChainMarket, callback: OnSlotFreed
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
|
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId, event.slotIndex)
|
callback(event.requestId, event.slotIndex)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to SlotFreed events"):
|
||||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeSlotReservationsFull*(
|
method subscribeSlotReservationsFull*(
|
||||||
market: OnChainMarket, callback: OnSlotReservationsFull
|
market: OnChainMarket, callback: OnSlotReservationsFull
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
|
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in SlotReservationsFull subscription",
|
error "There was an error in SlotReservationsFull subscription",
|
||||||
msg = eventErr.msg
|
msg = eventErr.msg
|
||||||
@ -364,28 +498,28 @@ method subscribeSlotReservationsFull*(
|
|||||||
|
|
||||||
callback(event.requestId, event.slotIndex)
|
callback(event.requestId, event.slotIndex)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
|
||||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeFulfillment(
|
method subscribeFulfillment(
|
||||||
market: OnChainMarket, callback: OnFulfillment
|
market: OnChainMarket, callback: OnFulfillment
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeFulfillment(
|
method subscribeFulfillment(
|
||||||
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
|
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
@ -393,28 +527,28 @@ method subscribeFulfillment(
|
|||||||
if event.requestId == requestId:
|
if event.requestId == requestId:
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestCancelled*(
|
method subscribeRequestCancelled*(
|
||||||
market: OnChainMarket, callback: OnRequestCancelled
|
market: OnChainMarket, callback: OnRequestCancelled
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestCancelled*(
|
method subscribeRequestCancelled*(
|
||||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
|
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
@ -422,28 +556,28 @@ method subscribeRequestCancelled*(
|
|||||||
if event.requestId == requestId:
|
if event.requestId == requestId:
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestFailed*(
|
method subscribeRequestFailed*(
|
||||||
market: OnChainMarket, callback: OnRequestFailed
|
market: OnChainMarket, callback: OnRequestFailed
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestFailed*(
|
method subscribeRequestFailed*(
|
||||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
|
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
@ -451,21 +585,21 @@ method subscribeRequestFailed*(
|
|||||||
if event.requestId == requestId:
|
if event.requestId == requestId:
|
||||||
callback(event.requestId)
|
callback(event.requestId)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeProofSubmission*(
|
method subscribeProofSubmission*(
|
||||||
market: OnChainMarket, callback: OnProofSubmitted
|
market: OnChainMarket, callback: OnProofSubmitted
|
||||||
): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
|
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.id)
|
callback(event.id)
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError("Failed to subscribe to ProofSubmitted events"):
|
||||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
@ -475,13 +609,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
|||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: OnChainMarket, fromBlock: BlockTag
|
market: OnChainMarket, fromBlock: BlockTag
|
||||||
): Future[seq[SlotFilled]] {.async.} =
|
): Future[seq[SlotFilled]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get past SlotFilled events from block"):
|
||||||
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: OnChainMarket, blocksAgo: int
|
market: OnChainMarket, blocksAgo: int
|
||||||
): Future[seq[SlotFilled]] {.async.} =
|
): Future[seq[SlotFilled]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get past SlotFilled events"):
|
||||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||||
|
|
||||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||||
@ -489,21 +623,58 @@ method queryPastSlotFilledEvents*(
|
|||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: OnChainMarket, fromTime: SecondsSince1970
|
market: OnChainMarket, fromTime: SecondsSince1970
|
||||||
): Future[seq[SlotFilled]] {.async.} =
|
): Future[seq[SlotFilled]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get past SlotFilled events from time"):
|
||||||
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||||
|
|
||||||
method queryPastStorageRequestedEvents*(
|
method queryPastStorageRequestedEvents*(
|
||||||
market: OnChainMarket, fromBlock: BlockTag
|
market: OnChainMarket, fromBlock: BlockTag
|
||||||
): Future[seq[StorageRequested]] {.async.} =
|
): Future[seq[StorageRequested]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get past StorageRequested events from block"):
|
||||||
return
|
return
|
||||||
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
||||||
|
|
||||||
method queryPastStorageRequestedEvents*(
|
method queryPastStorageRequestedEvents*(
|
||||||
market: OnChainMarket, blocksAgo: int
|
market: OnChainMarket, blocksAgo: int
|
||||||
): Future[seq[StorageRequested]] {.async.} =
|
): Future[seq[StorageRequested]] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError("Failed to get past StorageRequested events"):
|
||||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||||
|
|
||||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||||
|
|
||||||
|
method slotCollateral*(
|
||||||
|
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||||
|
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
|
||||||
|
let slotid = slotId(requestId, slotIndex)
|
||||||
|
|
||||||
|
try:
|
||||||
|
let slotState = await market.slotState(slotid)
|
||||||
|
|
||||||
|
without request =? await market.getRequest(requestId):
|
||||||
|
return failure newException(
|
||||||
|
MarketError, "Failure calculating the slotCollateral, cannot get the request"
|
||||||
|
)
|
||||||
|
|
||||||
|
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
|
||||||
|
except MarketError as error:
|
||||||
|
error "Error when trying to calculate the slotCollateral", error = error.msg
|
||||||
|
return failure error
|
||||||
|
|
||||||
|
method slotCollateral*(
|
||||||
|
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
|
||||||
|
): ?!UInt256 {.raises: [].} =
|
||||||
|
if slotState == SlotState.Repair:
|
||||||
|
without repairRewardPercentage =?
|
||||||
|
market.configuration .? collateral .? repairRewardPercentage:
|
||||||
|
return failure newException(
|
||||||
|
MarketError,
|
||||||
|
"Failure calculating the slotCollateral, cannot get the reward percentage",
|
||||||
|
)
|
||||||
|
|
||||||
|
return success (
|
||||||
|
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
|
||||||
|
100.u256
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return success(collateralPerSlot)
|
||||||
|
|||||||
@ -51,8 +51,8 @@ type
|
|||||||
Proofs_ProofNotMissing* = object of SolidityError
|
Proofs_ProofNotMissing* = object of SolidityError
|
||||||
Proofs_ProofNotRequired* = object of SolidityError
|
Proofs_ProofNotRequired* = object of SolidityError
|
||||||
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
||||||
Proofs_InvalidProbability* = object of SolidityError
|
|
||||||
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
||||||
|
SlotReservations_ReservationNotAllowed* = object of SolidityError
|
||||||
|
|
||||||
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||||
@ -67,7 +67,9 @@ proc requestStorage*(
|
|||||||
errors: [
|
errors: [
|
||||||
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
||||||
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
||||||
Marketplace_InvalidMaxSlotLoss,
|
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
|
||||||
|
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
|
||||||
|
Marketplace_InsufficientReward, Marketplace_InvalidCid,
|
||||||
]
|
]
|
||||||
.}
|
.}
|
||||||
|
|
||||||
@ -176,6 +178,17 @@ proc markProofAsMissing*(
|
|||||||
]
|
]
|
||||||
.}
|
.}
|
||||||
|
|
||||||
|
proc canMarkProofAsMissing*(
|
||||||
|
marketplace: Marketplace, id: SlotId, period: uint64
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
|
||||||
|
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
|
||||||
|
Proofs_ProofAlreadyMarkedMissing,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
proc reserveSlot*(
|
proc reserveSlot*(
|
||||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||||
): Confirmable {.contract.}
|
): Confirmable {.contract.}
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/contractabi
|
import pkg/contractabi
|
||||||
import pkg/ethers/fields
|
import pkg/ethers/contracts/fields
|
||||||
|
|
||||||
type
|
type
|
||||||
Groth16Proof* = object
|
Groth16Proof* = object
|
||||||
|
|||||||
@ -2,14 +2,13 @@ import std/hashes
|
|||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/typetraits
|
import std/typetraits
|
||||||
import pkg/contractabi
|
import pkg/contractabi
|
||||||
import pkg/nimcrypto
|
import pkg/nimcrypto/keccak
|
||||||
import pkg/ethers/fields
|
import pkg/ethers/contracts/fields
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
import pkg/libp2p/[cid, multicodec]
|
import pkg/libp2p/[cid, multicodec]
|
||||||
import ../logutils
|
import ../logutils
|
||||||
import ../utils/json
|
import ../utils/json
|
||||||
import ../clock
|
|
||||||
from ../errors import mapFailure
|
from ../errors import mapFailure
|
||||||
|
|
||||||
export contractabi
|
export contractabi
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,14 +7,16 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/algorithm
|
import std/algorithm
|
||||||
|
import std/net
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/stew/shims/net
|
|
||||||
import pkg/contractabi/address as ca
|
import pkg/contractabi/address as ca
|
||||||
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
||||||
from pkg/nimcrypto import keccak256
|
from pkg/nimcrypto import keccak256
|
||||||
@ -41,6 +43,7 @@ type Discovery* = ref object of RootObj
|
|||||||
# record to advertice node connection information, this carry any
|
# record to advertice node connection information, this carry any
|
||||||
# address that the node can be connected on
|
# address that the node can be connected on
|
||||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||||
|
isStarted: bool
|
||||||
|
|
||||||
proc toNodeId*(cid: Cid): NodeId =
|
proc toNodeId*(cid: Cid): NodeId =
|
||||||
## Cid to discovery id
|
## Cid to discovery id
|
||||||
@ -54,70 +57,122 @@ proc toNodeId*(host: ca.Address): NodeId =
|
|||||||
|
|
||||||
readUintBE[256](keccak256.digest(host.toArray).data)
|
readUintBE[256](keccak256.digest(host.toArray).data)
|
||||||
|
|
||||||
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
|
proc findPeer*(
|
||||||
|
d: Discovery, peerId: PeerId
|
||||||
|
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
|
||||||
trace "protocol.resolve..."
|
trace "protocol.resolve..."
|
||||||
## Find peer using the given Discovery object
|
## Find peer using the given Discovery object
|
||||||
##
|
##
|
||||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
|
||||||
|
|
||||||
return
|
try:
|
||||||
if node.isSome():
|
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||||
node.get().record.data.some
|
|
||||||
else:
|
|
||||||
PeerRecord.none
|
|
||||||
|
|
||||||
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
return
|
||||||
|
if node.isSome():
|
||||||
|
node.get().record.data.some
|
||||||
|
else:
|
||||||
|
PeerRecord.none
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||||
|
|
||||||
|
return PeerRecord.none
|
||||||
|
|
||||||
|
method find*(
|
||||||
|
d: Discovery, cid: Cid
|
||||||
|
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||||
## Find block providers
|
## Find block providers
|
||||||
##
|
##
|
||||||
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
|
||||||
warn "Error finding providers for block", cid, error = error.msg
|
|
||||||
|
|
||||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
try:
|
||||||
|
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
|
||||||
|
error:
|
||||||
|
warn "Error finding providers for block", cid, error = error.msg
|
||||||
|
|
||||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Error finding providers for block", cid, exc = exc.msg
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Error finding providers for block", cid, exc = exc.msg
|
||||||
|
|
||||||
|
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
|
||||||
## Provide a block Cid
|
## Provide a block Cid
|
||||||
##
|
##
|
||||||
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
try:
|
||||||
|
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||||
|
|
||||||
if nodes.len <= 0:
|
if nodes.len <= 0:
|
||||||
warn "Couldn't provide to any nodes!"
|
warn "Couldn't provide to any nodes!"
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Error providing block", cid, exc = exc.msg
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Error providing block", cid, exc = exc.msg
|
||||||
|
|
||||||
method find*(
|
method find*(
|
||||||
d: Discovery, host: ca.Address
|
d: Discovery, host: ca.Address
|
||||||
): Future[seq[SignedPeerRecord]] {.async, base.} =
|
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||||
## Find host providers
|
## Find host providers
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Finding providers for host", host = $host
|
try:
|
||||||
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
trace "Finding providers for host", host = $host
|
||||||
error:
|
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
error:
|
||||||
return
|
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||||
|
return
|
||||||
|
|
||||||
if providers.len <= 0:
|
if providers.len <= 0:
|
||||||
trace "No providers found", host = $host
|
trace "No providers found", host = $host
|
||||||
return
|
return
|
||||||
|
|
||||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||||
|
|
||||||
return providers
|
return providers
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||||
|
|
||||||
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
method provide*(
|
||||||
|
d: Discovery, host: ca.Address
|
||||||
|
) {.async: (raises: [CancelledError]), base.} =
|
||||||
## Provide hosts
|
## Provide hosts
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Providing host", host = $host
|
try:
|
||||||
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
trace "Providing host", host = $host
|
||||||
if nodes.len > 0:
|
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||||
trace "Provided to nodes", nodes = nodes.len
|
if nodes.len > 0:
|
||||||
|
trace "Provided to nodes", nodes = nodes.len
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Error providing host", host = $host, exc = exc.msg
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Error providing host", host = $host, exc = exc.msg
|
||||||
|
|
||||||
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
|
method removeProvider*(
|
||||||
|
d: Discovery, peerId: PeerId
|
||||||
|
): Future[void] {.base, async: (raises: [CancelledError]).} =
|
||||||
## Remove provider from providers table
|
## Remove provider from providers table
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Removing provider", peerId
|
trace "Removing provider", peerId
|
||||||
d.protocol.removeProvidersLocal(peerId)
|
try:
|
||||||
|
await d.protocol.removeProvidersLocal(peerId)
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||||
|
except Exception as exc: # Something in discv5 is raising Exception
|
||||||
|
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||||
|
raiseAssert("Unexpected Exception in removeProvider")
|
||||||
|
|
||||||
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||||
## Update providers record
|
## Update providers record
|
||||||
@ -125,7 +180,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
|||||||
|
|
||||||
d.announceAddrs = @addrs
|
d.announceAddrs = @addrs
|
||||||
|
|
||||||
trace "Updating announce record", addrs = d.announceAddrs
|
info "Updating announce record", addrs = d.announceAddrs
|
||||||
d.providerRecord = SignedPeerRecord
|
d.providerRecord = SignedPeerRecord
|
||||||
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
||||||
.expect("Should construct signed record").some
|
.expect("Should construct signed record").some
|
||||||
@ -137,7 +192,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
|||||||
## Update providers record
|
## Update providers record
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Updating Dht record", addrs = addrs
|
info "Updating Dht record", addrs = addrs
|
||||||
d.dhtRecord = SignedPeerRecord
|
d.dhtRecord = SignedPeerRecord
|
||||||
.init(d.key, PeerRecord.init(d.peerId, @addrs))
|
.init(d.key, PeerRecord.init(d.peerId, @addrs))
|
||||||
.expect("Should construct signed record").some
|
.expect("Should construct signed record").some
|
||||||
@ -145,12 +200,23 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
|||||||
if not d.protocol.isNil:
|
if not d.protocol.isNil:
|
||||||
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
|
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
|
||||||
|
|
||||||
proc start*(d: Discovery) {.async.} =
|
proc start*(d: Discovery) {.async: (raises: []).} =
|
||||||
d.protocol.open()
|
try:
|
||||||
await d.protocol.start()
|
d.protocol.open()
|
||||||
|
await d.protocol.start()
|
||||||
|
d.isStarted = true
|
||||||
|
except CatchableError as exc:
|
||||||
|
error "Error starting discovery", exc = exc.msg
|
||||||
|
|
||||||
proc stop*(d: Discovery) {.async.} =
|
proc stop*(d: Discovery) {.async: (raises: []).} =
|
||||||
await d.protocol.closeWait()
|
if not d.isStarted:
|
||||||
|
warn "Discovery not started, skipping stop"
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
await noCancel d.protocol.closeWait()
|
||||||
|
except CatchableError as exc:
|
||||||
|
error "Error stopping discovery", exc = exc.msg
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type Discovery,
|
T: type Discovery,
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,10 +7,7 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import ../stores
|
import ../stores
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,10 +7,7 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import std/[sugar, atomics, sequtils]
|
import std/[sugar, atomics, sequtils]
|
||||||
|
|
||||||
@ -25,6 +22,7 @@ import ../logutils
|
|||||||
import ../manifest
|
import ../manifest
|
||||||
import ../merkletree
|
import ../merkletree
|
||||||
import ../stores
|
import ../stores
|
||||||
|
import ../clock
|
||||||
import ../blocktype as bt
|
import ../blocktype as bt
|
||||||
import ../utils
|
import ../utils
|
||||||
import ../utils/asynciter
|
import ../utils/asynciter
|
||||||
@ -120,19 +118,22 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
|
|||||||
(idx - step) div steps
|
(idx - step) div steps
|
||||||
|
|
||||||
proc getPendingBlocks(
|
proc getPendingBlocks(
|
||||||
self: Erasure, manifest: Manifest, indicies: seq[int]
|
self: Erasure, manifest: Manifest, indices: seq[int]
|
||||||
): AsyncIter[(?!bt.Block, int)] =
|
): AsyncIter[(?!bt.Block, int)] =
|
||||||
## Get pending blocks iterator
|
## Get pending blocks iterator
|
||||||
##
|
##
|
||||||
|
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
|
||||||
|
|
||||||
var
|
proc attachIndex(
|
||||||
|
fut: Future[?!bt.Block], i: int
|
||||||
|
): Future[(?!bt.Block, int)] {.async.} =
|
||||||
|
## avoids closure capture issues
|
||||||
|
return (await fut, i)
|
||||||
|
|
||||||
|
for blockIndex in indices:
|
||||||
# request blocks from the store
|
# request blocks from the store
|
||||||
pendingBlocks = indicies.map(
|
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
|
||||||
(i: int) =>
|
pendingBlocks.add(attachIndex(fut, blockIndex))
|
||||||
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
|
|
||||||
(r: ?!bt.Block) => (r, i)
|
|
||||||
) # Get the data blocks (first K)
|
|
||||||
)
|
|
||||||
|
|
||||||
proc isFinished(): bool =
|
proc isFinished(): bool =
|
||||||
pendingBlocks.len == 0
|
pendingBlocks.len == 0
|
||||||
@ -168,16 +169,16 @@ proc prepareEncodingData(
|
|||||||
strategy = params.strategy.init(
|
strategy = params.strategy.init(
|
||||||
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
|
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
|
||||||
)
|
)
|
||||||
indicies = toSeq(strategy.getIndicies(step))
|
indices = toSeq(strategy.getIndices(step))
|
||||||
pendingBlocksIter =
|
pendingBlocksIter =
|
||||||
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
|
||||||
|
|
||||||
var resolved = 0
|
var resolved = 0
|
||||||
for fut in pendingBlocksIter:
|
for fut in pendingBlocksIter:
|
||||||
let (blkOrErr, idx) = await fut
|
let (blkOrErr, idx) = await fut
|
||||||
without blk =? blkOrErr, err:
|
without blk =? blkOrErr, err:
|
||||||
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
||||||
continue
|
return failure(err)
|
||||||
|
|
||||||
let pos = indexToPos(params.steps, idx, step)
|
let pos = indexToPos(params.steps, idx, step)
|
||||||
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||||
@ -185,7 +186,7 @@ proc prepareEncodingData(
|
|||||||
|
|
||||||
resolved.inc()
|
resolved.inc()
|
||||||
|
|
||||||
for idx in indicies.filterIt(it >= manifest.blocksCount):
|
for idx in indices.filterIt(it >= manifest.blocksCount):
|
||||||
let pos = indexToPos(params.steps, idx, step)
|
let pos = indexToPos(params.steps, idx, step)
|
||||||
trace "Padding with empty block", idx
|
trace "Padding with empty block", idx
|
||||||
shallowCopy(data[pos], emptyBlock)
|
shallowCopy(data[pos], emptyBlock)
|
||||||
@ -218,8 +219,8 @@ proc prepareDecodingData(
|
|||||||
strategy = encoded.protectedStrategy.init(
|
strategy = encoded.protectedStrategy.init(
|
||||||
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
|
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
|
||||||
)
|
)
|
||||||
indicies = toSeq(strategy.getIndicies(step))
|
indices = toSeq(strategy.getIndices(step))
|
||||||
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
|
||||||
|
|
||||||
var
|
var
|
||||||
dataPieces = 0
|
dataPieces = 0
|
||||||
@ -233,7 +234,7 @@ proc prepareDecodingData(
|
|||||||
|
|
||||||
let (blkOrErr, idx) = await fut
|
let (blkOrErr, idx) = await fut
|
||||||
without blk =? blkOrErr, err:
|
without blk =? blkOrErr, err:
|
||||||
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let pos = indexToPos(encoded.steps, idx, step)
|
let pos = indexToPos(encoded.steps, idx, step)
|
||||||
@ -310,10 +311,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
|
|||||||
else:
|
else:
|
||||||
task[].success.store(true)
|
task[].success.store(true)
|
||||||
|
|
||||||
proc encodeAsync*(
|
proc asyncEncode*(
|
||||||
self: Erasure,
|
self: Erasure,
|
||||||
blockSize, blocksLen, parityLen: int,
|
blockSize, blocksLen, parityLen: int,
|
||||||
data: ref seq[seq[byte]],
|
blocks: ref seq[seq[byte]],
|
||||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
without threadPtr =? ThreadSignalPtr.new():
|
without threadPtr =? ThreadSignalPtr.new():
|
||||||
@ -322,47 +323,37 @@ proc encodeAsync*(
|
|||||||
defer:
|
defer:
|
||||||
threadPtr.close().expect("closing once works")
|
threadPtr.close().expect("closing once works")
|
||||||
|
|
||||||
var blockData = createDoubleArray(blocksLen, blockSize)
|
var data = makeUncheckedArray(blocks)
|
||||||
|
|
||||||
for i in 0 ..< data[].len:
|
|
||||||
copyMem(blockData[i], addr data[i][0], blockSize)
|
|
||||||
|
|
||||||
defer:
|
defer:
|
||||||
freeDoubleArray(blockData, blocksLen)
|
dealloc(data)
|
||||||
|
|
||||||
## Create an ecode task with block data
|
## Create an ecode task with block data
|
||||||
var task = EncodeTask(
|
var task = EncodeTask(
|
||||||
erasure: addr self,
|
erasure: addr self,
|
||||||
blockSize: blockSize,
|
blockSize: blockSize,
|
||||||
blocksLen: blocksLen,
|
blocksLen: blocksLen,
|
||||||
parityLen: parityLen,
|
parityLen: parityLen,
|
||||||
blocks: blockData,
|
blocks: data,
|
||||||
parity: parity,
|
parity: parity,
|
||||||
signal: threadPtr,
|
signal: threadPtr,
|
||||||
)
|
)
|
||||||
|
|
||||||
let t = addr task
|
|
||||||
|
|
||||||
doAssert self.taskPool.numThreads > 1,
|
doAssert self.taskPool.numThreads > 1,
|
||||||
"Must have at least one separate thread or signal will never be fired"
|
"Must have at least one separate thread or signal will never be fired"
|
||||||
self.taskPool.spawn leopardEncodeTask(self.taskPool, t)
|
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
|
||||||
let threadFut = threadPtr.wait()
|
let threadFut = threadPtr.wait()
|
||||||
|
|
||||||
try:
|
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||||
await threadFut.join()
|
if err =? catch(await noCancel threadFut).errorOption:
|
||||||
except CatchableError as exc:
|
return failure(err)
|
||||||
try:
|
if joinErr of CancelledError:
|
||||||
await threadFut
|
raise (ref CancelledError) joinErr
|
||||||
except AsyncError as asyncExc:
|
else:
|
||||||
return failure(asyncExc.msg)
|
return failure(joinErr)
|
||||||
finally:
|
|
||||||
if exc of CancelledError:
|
|
||||||
raise (ref CancelledError) exc
|
|
||||||
else:
|
|
||||||
return failure(exc.msg)
|
|
||||||
|
|
||||||
if not t.success.load():
|
if not task.success.load():
|
||||||
return failure("Leopard encoding failed")
|
return failure("Leopard encoding task failed")
|
||||||
|
|
||||||
success()
|
success()
|
||||||
|
|
||||||
@ -392,6 +383,8 @@ proc encodeData(
|
|||||||
var
|
var
|
||||||
data = seq[seq[byte]].new() # number of blocks to encode
|
data = seq[seq[byte]].new() # number of blocks to encode
|
||||||
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
|
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
|
||||||
|
defer:
|
||||||
|
freeDoubleArray(parity, params.ecM)
|
||||||
|
|
||||||
data[].setLen(params.ecK)
|
data[].setLen(params.ecK)
|
||||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||||
@ -409,15 +402,13 @@ proc encodeData(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if err =? (
|
if err =? (
|
||||||
await self.encodeAsync(
|
await self.asyncEncode(
|
||||||
manifest.blockSize.int, params.ecK, params.ecM, data, parity
|
manifest.blockSize.int, params.ecK, params.ecM, data, parity
|
||||||
)
|
)
|
||||||
).errorOption:
|
).errorOption:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
finally:
|
|
||||||
freeDoubleArray(parity, params.ecM)
|
|
||||||
|
|
||||||
var idx = params.rounded + step
|
var idx = params.rounded + step
|
||||||
for j in 0 ..< params.ecM:
|
for j in 0 ..< params.ecM:
|
||||||
@ -429,8 +420,8 @@ proc encodeData(
|
|||||||
|
|
||||||
trace "Adding parity block", cid = blk.cid, idx
|
trace "Adding parity block", cid = blk.cid, idx
|
||||||
cids[idx] = blk.cid
|
cids[idx] = blk.cid
|
||||||
if isErr (await self.store.putBlock(blk)):
|
if error =? (await self.store.putBlock(blk)).errorOption:
|
||||||
trace "Unable to store block!", cid = blk.cid
|
warn "Unable to store block!", cid = blk.cid, msg = error.msg
|
||||||
return failure("Unable to store block!")
|
return failure("Unable to store block!")
|
||||||
idx.inc(params.steps)
|
idx.inc(params.steps)
|
||||||
|
|
||||||
@ -489,6 +480,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
|||||||
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||||
defer:
|
defer:
|
||||||
decoder.release()
|
decoder.release()
|
||||||
|
discard task[].signal.fireSync()
|
||||||
|
|
||||||
if (
|
if (
|
||||||
let res = decoder.decode(
|
let res = decoder.decode(
|
||||||
@ -506,9 +498,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
|||||||
else:
|
else:
|
||||||
task[].success.store(true)
|
task[].success.store(true)
|
||||||
|
|
||||||
discard task[].signal.fireSync()
|
proc asyncDecode*(
|
||||||
|
|
||||||
proc decodeAsync*(
|
|
||||||
self: Erasure,
|
self: Erasure,
|
||||||
blockSize, blocksLen, parityLen: int,
|
blockSize, blocksLen, parityLen: int,
|
||||||
blocks, parity: ref seq[seq[byte]],
|
blocks, parity: ref seq[seq[byte]],
|
||||||
@ -521,70 +511,47 @@ proc decodeAsync*(
|
|||||||
threadPtr.close().expect("closing once works")
|
threadPtr.close().expect("closing once works")
|
||||||
|
|
||||||
var
|
var
|
||||||
blocksData = createDoubleArray(blocksLen, blockSize)
|
blockData = makeUncheckedArray(blocks)
|
||||||
parityData = createDoubleArray(parityLen, blockSize)
|
parityData = makeUncheckedArray(parity)
|
||||||
|
|
||||||
for i in 0 ..< blocks[].len:
|
|
||||||
if blocks[i].len > 0:
|
|
||||||
copyMem(blocksData[i], addr blocks[i][0], blockSize)
|
|
||||||
else:
|
|
||||||
blocksData[i] = nil
|
|
||||||
|
|
||||||
for i in 0 ..< parity[].len:
|
|
||||||
if parity[i].len > 0:
|
|
||||||
copyMem(parityData[i], addr parity[i][0], blockSize)
|
|
||||||
else:
|
|
||||||
parityData[i] = nil
|
|
||||||
|
|
||||||
defer:
|
defer:
|
||||||
freeDoubleArray(blocksData, blocksLen)
|
dealloc(blockData)
|
||||||
freeDoubleArray(parityData, parityLen)
|
dealloc(parityData)
|
||||||
|
|
||||||
## Create an decode task with block data
|
## Create an decode task with block data
|
||||||
var task = DecodeTask(
|
var task = DecodeTask(
|
||||||
erasure: addr self,
|
erasure: addr self,
|
||||||
blockSize: blockSize,
|
blockSize: blockSize,
|
||||||
blocksLen: blocksLen,
|
blocksLen: blocksLen,
|
||||||
parityLen: parityLen,
|
parityLen: parityLen,
|
||||||
recoveredLen: blocksLen,
|
recoveredLen: blocksLen,
|
||||||
blocks: blocksData,
|
blocks: blockData,
|
||||||
parity: parityData,
|
parity: parityData,
|
||||||
recovered: recovered,
|
recovered: recovered,
|
||||||
signal: threadPtr,
|
signal: threadPtr,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Hold the task pointer until the signal is received
|
|
||||||
let t = addr task
|
|
||||||
doAssert self.taskPool.numThreads > 1,
|
doAssert self.taskPool.numThreads > 1,
|
||||||
"Must have at least one separate thread or signal will never be fired"
|
"Must have at least one separate thread or signal will never be fired"
|
||||||
self.taskPool.spawn leopardDecodeTask(self.taskPool, t)
|
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
|
||||||
let threadFut = threadPtr.wait()
|
let threadFut = threadPtr.wait()
|
||||||
|
|
||||||
try:
|
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||||
await threadFut.join()
|
if err =? catch(await noCancel threadFut).errorOption:
|
||||||
except CatchableError as exc:
|
return failure(err)
|
||||||
try:
|
if joinErr of CancelledError:
|
||||||
await threadFut
|
raise (ref CancelledError) joinErr
|
||||||
except AsyncError as asyncExc:
|
else:
|
||||||
return failure(asyncExc.msg)
|
return failure(joinErr)
|
||||||
finally:
|
|
||||||
if exc of CancelledError:
|
|
||||||
raise (ref CancelledError) exc
|
|
||||||
else:
|
|
||||||
return failure(exc.msg)
|
|
||||||
|
|
||||||
if not t.success.load():
|
if not task.success.load():
|
||||||
return failure("Leopard encoding failed")
|
return failure("Leopard decoding task failed")
|
||||||
|
|
||||||
success()
|
success()
|
||||||
|
|
||||||
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
proc decodeInternal(
|
||||||
## Decode a protected manifest into it's original
|
self: Erasure, encoded: Manifest
|
||||||
## manifest
|
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
|
||||||
##
|
|
||||||
## `encoded` - the encoded (protected) manifest to
|
|
||||||
## be recovered
|
|
||||||
##
|
|
||||||
logScope:
|
logScope:
|
||||||
steps = encoded.steps
|
steps = encoded.steps
|
||||||
rounded_blocks = encoded.rounded
|
rounded_blocks = encoded.rounded
|
||||||
@ -608,6 +575,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
|||||||
data = seq[seq[byte]].new()
|
data = seq[seq[byte]].new()
|
||||||
parityData = seq[seq[byte]].new()
|
parityData = seq[seq[byte]].new()
|
||||||
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
|
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
|
||||||
|
defer:
|
||||||
|
freeDoubleArray(recovered, encoded.ecK)
|
||||||
|
|
||||||
data[].setLen(encoded.ecK) # set len to K
|
data[].setLen(encoded.ecK) # set len to K
|
||||||
parityData[].setLen(encoded.ecM) # set len to M
|
parityData[].setLen(encoded.ecM) # set len to M
|
||||||
@ -627,15 +596,13 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
|||||||
trace "Erasure decoding data"
|
trace "Erasure decoding data"
|
||||||
try:
|
try:
|
||||||
if err =? (
|
if err =? (
|
||||||
await self.decodeAsync(
|
await self.asyncDecode(
|
||||||
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
|
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
|
||||||
)
|
)
|
||||||
).errorOption:
|
).errorOption:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
finally:
|
|
||||||
freeDoubleArray(recovered, encoded.ecK)
|
|
||||||
|
|
||||||
for i in 0 ..< encoded.ecK:
|
for i in 0 ..< encoded.ecK:
|
||||||
let idx = i * encoded.steps + step
|
let idx = i * encoded.steps + step
|
||||||
@ -649,10 +616,12 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
|||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
trace "Recovered block", cid = blk.cid, index = i
|
trace "Recovered block", cid = blk.cid, index = i
|
||||||
if isErr (await self.store.putBlock(blk)):
|
if error =? (await self.store.putBlock(blk)).errorOption:
|
||||||
trace "Unable to store block!", cid = blk.cid
|
warn "Unable to store block!", cid = blk.cid, msg = error.msg
|
||||||
return failure("Unable to store block!")
|
return failure("Unable to store block!")
|
||||||
|
|
||||||
|
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
|
||||||
|
|
||||||
cids[idx] = blk.cid
|
cids[idx] = blk.cid
|
||||||
recoveredIndices.add(idx)
|
recoveredIndices.add(idx)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
@ -664,6 +633,19 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
|||||||
finally:
|
finally:
|
||||||
decoder.release()
|
decoder.release()
|
||||||
|
|
||||||
|
return (cids, recoveredIndices).success
|
||||||
|
|
||||||
|
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||||
|
## Decode a protected manifest into it's original
|
||||||
|
## manifest
|
||||||
|
##
|
||||||
|
## `encoded` - the encoded (protected) manifest to
|
||||||
|
## be recovered
|
||||||
|
##
|
||||||
|
|
||||||
|
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
|
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -685,6 +667,44 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
|||||||
|
|
||||||
return decoded.success
|
return decoded.success
|
||||||
|
|
||||||
|
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
|
||||||
|
## Repair a protected manifest by reconstructing the full dataset
|
||||||
|
##
|
||||||
|
## `encoded` - the encoded (protected) manifest to
|
||||||
|
## be repaired
|
||||||
|
##
|
||||||
|
|
||||||
|
without (cids, _) =? (await self.decodeInternal(encoded)), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
without treeCid =? tree.rootCid, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if treeCid != encoded.originalTreeCid:
|
||||||
|
return failure(
|
||||||
|
"Original tree root differs from the tree root computed out of recovered data"
|
||||||
|
)
|
||||||
|
|
||||||
|
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
without repaired =? (
|
||||||
|
await self.encode(
|
||||||
|
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
|
||||||
|
)
|
||||||
|
), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if repaired.treeCid != encoded.treeCid:
|
||||||
|
return failure(
|
||||||
|
"Original tree root differs from the repaired tree root encoded out of recovered data"
|
||||||
|
)
|
||||||
|
|
||||||
|
return success()
|
||||||
|
|
||||||
proc start*(self: Erasure) {.async.} =
|
proc start*(self: Erasure) {.async.} =
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,7 +7,11 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/options
|
import std/options
|
||||||
|
import std/sugar
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
import pkg/results
|
import pkg/results
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
@ -19,6 +23,8 @@ type
|
|||||||
CodexError* = object of CatchableError # base codex error
|
CodexError* = object of CatchableError # base codex error
|
||||||
CodexResult*[T] = Result[T, ref CodexError]
|
CodexResult*[T] = Result[T, ref CodexError]
|
||||||
|
|
||||||
|
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
|
||||||
|
|
||||||
template mapFailure*[T, V, E](
|
template mapFailure*[T, V, E](
|
||||||
exp: Result[T, V], exc: typedesc[E]
|
exp: Result[T, V], exc: typedesc[E]
|
||||||
): Result[T, ref CatchableError] =
|
): Result[T, ref CatchableError] =
|
||||||
@ -40,35 +46,43 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
|||||||
else:
|
else:
|
||||||
T.failure("Option is None")
|
T.failure("Option is None")
|
||||||
|
|
||||||
# allFuturesThrowing was moved to the tests in libp2p
|
proc allFinishedFailed*[T](
|
||||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
futs: auto
|
||||||
var futs: seq[Future[T]]
|
): Future[FinishedFailed[T]] {.async: (raises: [CancelledError]).} =
|
||||||
for fut in args:
|
## Check if all futures have finished or failed
|
||||||
futs &= fut
|
##
|
||||||
proc call() {.async.} =
|
## TODO: wip, not sure if we want this - at the minimum,
|
||||||
var first: ref CatchableError = nil
|
## we should probably avoid the async transform
|
||||||
futs = await allFinished(futs)
|
|
||||||
for fut in futs:
|
|
||||||
if fut.failed:
|
|
||||||
let err = fut.readError()
|
|
||||||
if err of Defect:
|
|
||||||
raise err
|
|
||||||
else:
|
|
||||||
if err of CancelledError:
|
|
||||||
raise err
|
|
||||||
if isNil(first):
|
|
||||||
first = err
|
|
||||||
if not isNil(first):
|
|
||||||
raise first
|
|
||||||
|
|
||||||
return call()
|
var res: FinishedFailed[T] = (@[], @[])
|
||||||
|
await allFutures(futs)
|
||||||
|
for f in futs:
|
||||||
|
if f.failed:
|
||||||
|
res.failure.add f
|
||||||
|
else:
|
||||||
|
res.success.add f
|
||||||
|
|
||||||
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
return res
|
||||||
try:
|
|
||||||
await allFuturesThrowing(fut)
|
|
||||||
except CancelledError as exc:
|
|
||||||
raise exc
|
|
||||||
except CatchableError as exc:
|
|
||||||
return failure(exc.msg)
|
|
||||||
|
|
||||||
return success()
|
proc allFinishedValues*[T](
|
||||||
|
futs: auto
|
||||||
|
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||||
|
## If all futures have finished, return corresponding values,
|
||||||
|
## otherwise return failure
|
||||||
|
##
|
||||||
|
|
||||||
|
# wait for all futures to be either completed, failed or canceled
|
||||||
|
await allFutures(futs)
|
||||||
|
|
||||||
|
let numOfFailed = futs.countIt(it.failed)
|
||||||
|
|
||||||
|
if numOfFailed > 0:
|
||||||
|
return failure "Some futures failed (" & $numOfFailed & "))"
|
||||||
|
|
||||||
|
# here, we know there are no failed futures in "futs"
|
||||||
|
# and we are only interested in those that completed successfully
|
||||||
|
let values = collect:
|
||||||
|
for b in futs:
|
||||||
|
if b.finished:
|
||||||
|
b.value
|
||||||
|
return success values
|
||||||
|
|||||||
@ -24,13 +24,17 @@ type
|
|||||||
IndexingError* = object of CodexError
|
IndexingError* = object of CodexError
|
||||||
IndexingWrongIndexError* = object of IndexingError
|
IndexingWrongIndexError* = object of IndexingError
|
||||||
IndexingWrongIterationsError* = object of IndexingError
|
IndexingWrongIterationsError* = object of IndexingError
|
||||||
|
IndexingWrongGroupCountError* = object of IndexingError
|
||||||
|
IndexingWrongPadBlockCountError* = object of IndexingError
|
||||||
|
|
||||||
IndexingStrategy* = object
|
IndexingStrategy* = object
|
||||||
strategyType*: StrategyType
|
strategyType*: StrategyType # Indexing strategy algorithm
|
||||||
firstIndex*: int # Lowest index that can be returned
|
firstIndex*: int # Lowest index that can be returned
|
||||||
lastIndex*: int # Highest index that can be returned
|
lastIndex*: int # Highest index that can be returned
|
||||||
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
|
iterations*: int # Number of iteration steps (0 ..< iterations)
|
||||||
step*: int
|
step*: int # Step size between generated indices
|
||||||
|
groupCount*: int # Number of groups to partition indices into
|
||||||
|
padBlockCount*: int # Number of padding blocks to append per group
|
||||||
|
|
||||||
func checkIteration(
|
func checkIteration(
|
||||||
self: IndexingStrategy, iteration: int
|
self: IndexingStrategy, iteration: int
|
||||||
@ -44,39 +48,47 @@ func getIter(first, last, step: int): Iter[int] =
|
|||||||
{.cast(noSideEffect).}:
|
{.cast(noSideEffect).}:
|
||||||
Iter[int].new(first, last, step)
|
Iter[int].new(first, last, step)
|
||||||
|
|
||||||
func getLinearIndicies(
|
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||||
self: IndexingStrategy, iteration: int
|
|
||||||
): Iter[int] {.raises: [IndexingError].} =
|
|
||||||
self.checkIteration(iteration)
|
|
||||||
|
|
||||||
let
|
let
|
||||||
first = self.firstIndex + iteration * self.step
|
first = self.firstIndex + iteration * self.step
|
||||||
last = min(first + self.step - 1, self.lastIndex)
|
last = min(first + self.step - 1, self.lastIndex)
|
||||||
|
|
||||||
getIter(first, last, 1)
|
getIter(first, last, 1)
|
||||||
|
|
||||||
func getSteppedIndicies(
|
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||||
self: IndexingStrategy, iteration: int
|
|
||||||
): Iter[int] {.raises: [IndexingError].} =
|
|
||||||
self.checkIteration(iteration)
|
|
||||||
|
|
||||||
let
|
let
|
||||||
first = self.firstIndex + iteration
|
first = self.firstIndex + iteration
|
||||||
last = self.lastIndex
|
last = self.lastIndex
|
||||||
|
|
||||||
getIter(first, last, self.iterations)
|
getIter(first, last, self.iterations)
|
||||||
|
|
||||||
func getIndicies*(
|
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||||
self: IndexingStrategy, iteration: int
|
|
||||||
): Iter[int] {.raises: [IndexingError].} =
|
|
||||||
case self.strategyType
|
case self.strategyType
|
||||||
of StrategyType.LinearStrategy:
|
of StrategyType.LinearStrategy:
|
||||||
self.getLinearIndicies(iteration)
|
self.getLinearIndices(iteration)
|
||||||
of StrategyType.SteppedStrategy:
|
of StrategyType.SteppedStrategy:
|
||||||
self.getSteppedIndicies(iteration)
|
self.getSteppedIndices(iteration)
|
||||||
|
|
||||||
|
func getIndices*(
|
||||||
|
self: IndexingStrategy, iteration: int
|
||||||
|
): Iter[int] {.raises: [IndexingError].} =
|
||||||
|
self.checkIteration(iteration)
|
||||||
|
{.cast(noSideEffect).}:
|
||||||
|
Iter[int].new(
|
||||||
|
iterator (): int {.gcsafe.} =
|
||||||
|
for value in self.getStrategyIndices(iteration):
|
||||||
|
yield value
|
||||||
|
|
||||||
|
for i in 0 ..< self.padBlockCount:
|
||||||
|
yield self.lastIndex + (iteration + 1) + i * self.groupCount
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
strategy: StrategyType, firstIndex, lastIndex, iterations: int
|
strategy: StrategyType,
|
||||||
|
firstIndex, lastIndex, iterations: int,
|
||||||
|
groupCount = 0,
|
||||||
|
padBlockCount = 0,
|
||||||
): IndexingStrategy {.raises: [IndexingError].} =
|
): IndexingStrategy {.raises: [IndexingError].} =
|
||||||
if firstIndex > lastIndex:
|
if firstIndex > lastIndex:
|
||||||
raise newException(
|
raise newException(
|
||||||
@ -91,10 +103,24 @@ func init*(
|
|||||||
"iterations (" & $iterations & ") must be greater than zero.",
|
"iterations (" & $iterations & ") must be greater than zero.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if padBlockCount < 0:
|
||||||
|
raise newException(
|
||||||
|
IndexingWrongPadBlockCountError,
|
||||||
|
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
|
||||||
|
)
|
||||||
|
|
||||||
|
if padBlockCount > 0 and groupCount <= 0:
|
||||||
|
raise newException(
|
||||||
|
IndexingWrongGroupCountError,
|
||||||
|
"groupCount (" & $groupCount & ") must be greater than zero.",
|
||||||
|
)
|
||||||
|
|
||||||
IndexingStrategy(
|
IndexingStrategy(
|
||||||
strategyType: strategy,
|
strategyType: strategy,
|
||||||
firstIndex: firstIndex,
|
firstIndex: firstIndex,
|
||||||
lastIndex: lastIndex,
|
lastIndex: lastIndex,
|
||||||
iterations: iterations,
|
iterations: iterations,
|
||||||
step: divUp((lastIndex - firstIndex + 1), iterations),
|
step: divUp((lastIndex - firstIndex + 1), iterations),
|
||||||
|
groupCount: groupCount,
|
||||||
|
padBlockCount: padBlockCount,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -11,7 +11,7 @@
|
|||||||
## 4. Remove usages of `nim-json-serialization` from the codebase
|
## 4. Remove usages of `nim-json-serialization` from the codebase
|
||||||
## 5. Remove need to declare `writeValue` for new types
|
## 5. Remove need to declare `writeValue` for new types
|
||||||
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
|
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
|
||||||
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
|
## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
|
||||||
##
|
##
|
||||||
## When declaring a new type, one should consider importing the `codex/logutils`
|
## When declaring a new type, one should consider importing the `codex/logutils`
|
||||||
## module, and specifying `formatIt`. If textlines log output and json log output
|
## module, and specifying `formatIt`. If textlines log output and json log output
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -9,11 +9,9 @@
|
|||||||
|
|
||||||
# This module implements serialization and deserialization of Manifest
|
# This module implements serialization and deserialization of Manifest
|
||||||
|
|
||||||
import pkg/upraises
|
|
||||||
import times
|
import times
|
||||||
|
|
||||||
push:
|
{.push raises: [].}
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -9,10 +9,7 @@
|
|||||||
|
|
||||||
# This module defines all operations on Manifest
|
# This module defines all operations on Manifest
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
import pkg/libp2p/[cid, multihash, multicodec]
|
import pkg/libp2p/[cid, multihash, multicodec]
|
||||||
|
|||||||
103
codex/market.nim
103
codex/market.nim
@ -1,5 +1,4 @@
|
|||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/upraises
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/ethers/erc20
|
import pkg/ethers/erc20
|
||||||
import ./contracts/requests
|
import ./contracts/requests
|
||||||
@ -18,17 +17,20 @@ export periods
|
|||||||
type
|
type
|
||||||
Market* = ref object of RootObj
|
Market* = ref object of RootObj
|
||||||
MarketError* = object of CodexError
|
MarketError* = object of CodexError
|
||||||
|
SlotStateMismatchError* = object of MarketError
|
||||||
|
SlotReservationNotAllowedError* = object of MarketError
|
||||||
|
ProofInvalidError* = object of MarketError
|
||||||
Subscription* = ref object of RootObj
|
Subscription* = ref object of RootObj
|
||||||
OnRequest* =
|
OnRequest* =
|
||||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
|
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
|
||||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||||
OnSlotReservationsFull* =
|
OnSlotReservationsFull* =
|
||||||
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
|
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
|
||||||
ProofChallenge* = array[32, byte]
|
ProofChallenge* = array[32, byte]
|
||||||
|
|
||||||
# Marketplace events -- located here due to the Market abstraction
|
# Marketplace events -- located here due to the Market abstraction
|
||||||
@ -62,25 +64,42 @@ type
|
|||||||
ProofSubmitted* = object of MarketplaceEvent
|
ProofSubmitted* = object of MarketplaceEvent
|
||||||
id*: SlotId
|
id*: SlotId
|
||||||
|
|
||||||
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
|
method loadConfig*(
|
||||||
|
market: Market
|
||||||
|
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
method getZkeyHash*(
|
||||||
|
market: Market
|
||||||
|
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
method getSigner*(
|
||||||
|
market: Market
|
||||||
|
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method proofTimeout*(market: Market): Future[uint64] {.base, async.} =
|
method periodicity*(
|
||||||
|
market: Market
|
||||||
|
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} =
|
method proofTimeout*(
|
||||||
|
market: Market
|
||||||
|
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method repairRewardPercentage*(
|
||||||
|
market: Market
|
||||||
|
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
|
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
method proofDowntime*(
|
||||||
|
market: Market
|
||||||
|
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||||
@ -91,7 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
|||||||
let pntr = await market.getPointer(slotId)
|
let pntr = await market.getPointer(slotId)
|
||||||
return pntr < downtime
|
return pntr < downtime
|
||||||
|
|
||||||
method requestStorage*(market: Market, request: StorageRequest) {.base, async.} =
|
method requestStorage*(
|
||||||
|
market: Market, request: StorageRequest
|
||||||
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||||
@ -102,7 +123,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
|||||||
|
|
||||||
method getRequest*(
|
method getRequest*(
|
||||||
market: Market, id: RequestId
|
market: Market, id: RequestId
|
||||||
): Future[?StorageRequest] {.base, async.} =
|
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method requestState*(
|
method requestState*(
|
||||||
@ -110,7 +131,9 @@ method requestState*(
|
|||||||
): Future[?RequestState] {.base, async.} =
|
): Future[?RequestState] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} =
|
method slotState*(
|
||||||
|
market: Market, slotId: SlotId
|
||||||
|
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getRequestEnd*(
|
method getRequestEnd*(
|
||||||
@ -125,12 +148,12 @@ method requestExpiresAt*(
|
|||||||
|
|
||||||
method getHost*(
|
method getHost*(
|
||||||
market: Market, requestId: RequestId, slotIndex: uint64
|
market: Market, requestId: RequestId, slotIndex: uint64
|
||||||
): Future[?Address] {.base, async.} =
|
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method currentCollateral*(
|
method currentCollateral*(
|
||||||
market: Market, slotId: SlotId
|
market: Market, slotId: SlotId
|
||||||
): Future[UInt256] {.base, async.} =
|
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||||
@ -142,13 +165,17 @@ method fillSlot*(
|
|||||||
slotIndex: uint64,
|
slotIndex: uint64,
|
||||||
proof: Groth16Proof,
|
proof: Groth16Proof,
|
||||||
collateral: UInt256,
|
collateral: UInt256,
|
||||||
) {.base, async.} =
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
method freeSlot*(
|
||||||
|
market: Market, slotId: SlotId
|
||||||
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} =
|
method withdrawFunds*(
|
||||||
|
market: Market, requestId: RequestId
|
||||||
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequests*(
|
method subscribeRequests*(
|
||||||
@ -167,20 +194,24 @@ method getChallenge*(
|
|||||||
): Future[ProofChallenge] {.base, async.} =
|
): Future[ProofChallenge] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} =
|
method submitProof*(
|
||||||
|
market: Market, id: SlotId, proof: Groth16Proof
|
||||||
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} =
|
method markProofAsMissing*(
|
||||||
raiseAssert("not implemented")
|
|
||||||
|
|
||||||
method canProofBeMarkedAsMissing*(
|
|
||||||
market: Market, id: SlotId, period: Period
|
market: Market, id: SlotId, period: Period
|
||||||
): Future[bool] {.base, async.} =
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method canMarkProofAsMissing*(
|
||||||
|
market: Market, id: SlotId, period: Period
|
||||||
|
): Future[bool] {.base, async: (raises: [CancelledError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method reserveSlot*(
|
method reserveSlot*(
|
||||||
market: Market, requestId: RequestId, slotIndex: uint64
|
market: Market, requestId: RequestId, slotIndex: uint64
|
||||||
) {.base, async.} =
|
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method canReserveSlot*(
|
method canReserveSlot*(
|
||||||
@ -243,7 +274,7 @@ method subscribeProofSubmission*(
|
|||||||
): Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
|
method unsubscribe*(subscription: Subscription) {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
@ -270,3 +301,13 @@ method queryPastStorageRequestedEvents*(
|
|||||||
market: Market, blocksAgo: int
|
market: Market, blocksAgo: int
|
||||||
): Future[seq[StorageRequested]] {.base, async.} =
|
): Future[seq[StorageRequested]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method slotCollateral*(
|
||||||
|
market: Market, requestId: RequestId, slotIndex: uint64
|
||||||
|
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method slotCollateral*(
|
||||||
|
market: Market, collateralPerSlot: UInt256, slotState: SlotState
|
||||||
|
): ?!UInt256 {.base, gcsafe, raises: [].} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,10 +7,7 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
@ -27,11 +24,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
|
|||||||
const MaxMerkleProofSize = 1.MiBs.uint
|
const MaxMerkleProofSize = 1.MiBs.uint
|
||||||
|
|
||||||
proc encode*(self: CodexTree): seq[byte] =
|
proc encode*(self: CodexTree): seq[byte] =
|
||||||
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
var pb = initProtoBuffer()
|
||||||
pb.write(1, self.mcodec.uint64)
|
pb.write(1, self.mcodec.uint64)
|
||||||
pb.write(2, self.leavesCount.uint64)
|
pb.write(2, self.leavesCount.uint64)
|
||||||
for node in self.nodes:
|
for node in self.nodes:
|
||||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
var nodesPb = initProtoBuffer()
|
||||||
nodesPb.write(1, node)
|
nodesPb.write(1, node)
|
||||||
nodesPb.finish()
|
nodesPb.finish()
|
||||||
pb.write(3, nodesPb)
|
pb.write(3, nodesPb)
|
||||||
@ -40,7 +37,7 @@ proc encode*(self: CodexTree): seq[byte] =
|
|||||||
pb.buffer
|
pb.buffer
|
||||||
|
|
||||||
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
var pb = initProtoBuffer(data)
|
||||||
var mcodecCode: uint64
|
var mcodecCode: uint64
|
||||||
var leavesCount: uint64
|
var leavesCount: uint64
|
||||||
discard ?pb.getField(1, mcodecCode).mapFailure
|
discard ?pb.getField(1, mcodecCode).mapFailure
|
||||||
@ -63,13 +60,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
|||||||
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||||
|
|
||||||
proc encode*(self: CodexProof): seq[byte] =
|
proc encode*(self: CodexProof): seq[byte] =
|
||||||
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
var pb = initProtoBuffer()
|
||||||
pb.write(1, self.mcodec.uint64)
|
pb.write(1, self.mcodec.uint64)
|
||||||
pb.write(2, self.index.uint64)
|
pb.write(2, self.index.uint64)
|
||||||
pb.write(3, self.nleaves.uint64)
|
pb.write(3, self.nleaves.uint64)
|
||||||
|
|
||||||
for node in self.path:
|
for node in self.path:
|
||||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
var nodesPb = initProtoBuffer()
|
||||||
nodesPb.write(1, node)
|
nodesPb.write(1, node)
|
||||||
nodesPb.finish()
|
nodesPb.finish()
|
||||||
pb.write(4, nodesPb)
|
pb.write(4, nodesPb)
|
||||||
@ -78,7 +75,7 @@ proc encode*(self: CodexProof): seq[byte] =
|
|||||||
pb.buffer
|
pb.buffer
|
||||||
|
|
||||||
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
var pb = initProtoBuffer(data)
|
||||||
var mcodecCode: uint64
|
var mcodecCode: uint64
|
||||||
var index: uint64
|
var index: uint64
|
||||||
var nleaves: uint64
|
var nleaves: uint64
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -15,7 +15,7 @@ import std/sequtils
|
|||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/libp2p/[cid, multicodec, multihash]
|
import pkg/libp2p/[cid, multicodec, multihash]
|
||||||
|
import pkg/constantine/hashes
|
||||||
import ../../utils
|
import ../../utils
|
||||||
import ../../rng
|
import ../../rng
|
||||||
import ../../errors
|
import ../../errors
|
||||||
@ -47,28 +47,6 @@ type
|
|||||||
CodexProof* = ref object of ByteProof
|
CodexProof* = ref object of ByteProof
|
||||||
mcodec*: MultiCodec
|
mcodec*: MultiCodec
|
||||||
|
|
||||||
# CodeHashes is not exported from libp2p
|
|
||||||
# So we need to recreate it instead of
|
|
||||||
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
|
|
||||||
for item in HashesList:
|
|
||||||
result[item.mcodec] = item
|
|
||||||
|
|
||||||
const CodeHashes = initMultiHashCodeTable()
|
|
||||||
|
|
||||||
func mhash*(mcodec: MultiCodec): ?!MHash =
|
|
||||||
let mhash = CodeHashes.getOrDefault(mcodec)
|
|
||||||
|
|
||||||
if isNil(mhash.coder):
|
|
||||||
return failure "Invalid multihash codec"
|
|
||||||
|
|
||||||
success mhash
|
|
||||||
|
|
||||||
func digestSize*(self: (CodexTree or CodexProof)): int =
|
|
||||||
## Number of leaves
|
|
||||||
##
|
|
||||||
|
|
||||||
self.mhash.size
|
|
||||||
|
|
||||||
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
||||||
var proof = CodexProof(mcodec: self.mcodec)
|
var proof = CodexProof(mcodec: self.mcodec)
|
||||||
|
|
||||||
@ -128,13 +106,12 @@ proc `$`*(self: CodexProof): string =
|
|||||||
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
|
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
|
||||||
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
|
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
|
||||||
|
|
||||||
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
|
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
|
||||||
## Compress two hashes
|
## Compress two hashes
|
||||||
##
|
##
|
||||||
|
let input = @x & @y & @[key.byte]
|
||||||
var digest = newSeq[byte](mhash.size)
|
let digest = ?MultiHash.digest(codec, input).mapFailure
|
||||||
mhash.coder(@x & @y & @[key.byte], digest)
|
success digest.digestBytes
|
||||||
success digest
|
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
||||||
@ -143,12 +120,12 @@ func init*(
|
|||||||
return failure "Empty leaves"
|
return failure "Empty leaves"
|
||||||
|
|
||||||
let
|
let
|
||||||
mhash = ?mcodec.mhash()
|
|
||||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||||
compress(x, y, key, mhash)
|
compress(x, y, key, mcodec)
|
||||||
Zero: ByteHash = newSeq[byte](mhash.size)
|
digestSize = ?mcodec.digestSize.mapFailure
|
||||||
|
Zero: ByteHash = newSeq[byte](digestSize)
|
||||||
|
|
||||||
if mhash.size != leaves[0].len:
|
if digestSize != leaves[0].len:
|
||||||
return failure "Invalid hash length"
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||||
@ -186,12 +163,12 @@ proc fromNodes*(
|
|||||||
return failure "Empty nodes"
|
return failure "Empty nodes"
|
||||||
|
|
||||||
let
|
let
|
||||||
mhash = ?mcodec.mhash()
|
digestSize = ?mcodec.digestSize.mapFailure
|
||||||
Zero = newSeq[byte](mhash.size)
|
Zero = newSeq[byte](digestSize)
|
||||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||||
compress(x, y, key, mhash)
|
compress(x, y, key, mcodec)
|
||||||
|
|
||||||
if mhash.size != nodes[0].len:
|
if digestSize != nodes[0].len:
|
||||||
return failure "Invalid hash length"
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
var
|
var
|
||||||
@ -224,10 +201,10 @@ func init*(
|
|||||||
return failure "Empty nodes"
|
return failure "Empty nodes"
|
||||||
|
|
||||||
let
|
let
|
||||||
mhash = ?mcodec.mhash()
|
digestSize = ?mcodec.digestSize.mapFailure
|
||||||
Zero = newSeq[byte](mhash.size)
|
Zero = newSeq[byte](digestSize)
|
||||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
|
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
|
||||||
compress(x, y, key, mhash)
|
compress(x, y, key, mcodec)
|
||||||
|
|
||||||
success CodexProof(
|
success CodexProof(
|
||||||
compress: compressor,
|
compress: compressor,
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
11
codex/multicodec_exts.nim
Normal file
11
codex/multicodec_exts.nim
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
const CodecExts = [
|
||||||
|
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
|
||||||
|
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
|
||||||
|
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
|
||||||
|
("codex-manifest", 0xCD01),
|
||||||
|
("codex-block", 0xCD02),
|
||||||
|
("codex-root", 0xCD03),
|
||||||
|
("codex-slot-root", 0xCD04),
|
||||||
|
("codex-proving-root", 0xCD05),
|
||||||
|
("codex-slot-cell", 0xCD06),
|
||||||
|
]
|
||||||
40
codex/multihash_exts.nim
Normal file
40
codex/multihash_exts.nim
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
import blscurve/bls_public_exports
|
||||||
|
import pkg/constantine/hashes
|
||||||
|
import poseidon2
|
||||||
|
|
||||||
|
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
|
||||||
|
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||||
|
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
|
||||||
|
if len(output) > 0:
|
||||||
|
let digest = hashes.sha256.hash(data)
|
||||||
|
copyMem(addr output[0], addr digest[0], 32)
|
||||||
|
|
||||||
|
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
|
||||||
|
if len(output) > 0:
|
||||||
|
var digest = poseidon2.Sponge.digest(data).toBytes()
|
||||||
|
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||||
|
|
||||||
|
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
|
||||||
|
if len(output) > 0:
|
||||||
|
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
|
||||||
|
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||||
|
|
||||||
|
const Sha2256MultiHash* = MHash(
|
||||||
|
mcodec: multiCodec("sha2-256"),
|
||||||
|
size: sha256.sizeDigest,
|
||||||
|
coder: sha2_256hash_constantine,
|
||||||
|
)
|
||||||
|
const HashExts = [
|
||||||
|
# override sha2-256 hash function
|
||||||
|
Sha2256MultiHash,
|
||||||
|
MHash(
|
||||||
|
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
|
||||||
|
size: 32,
|
||||||
|
coder: poseidon2_sponge_rate2,
|
||||||
|
),
|
||||||
|
MHash(
|
||||||
|
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
|
||||||
|
size: 32,
|
||||||
|
coder: poseidon2_merkle_2kb_sponge,
|
||||||
|
),
|
||||||
|
]
|
||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
173
codex/nat.nim
173
codex/nat.nim
@ -9,11 +9,11 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, os, strutils, times, net],
|
std/[options, os, strutils, times, net, atomics],
|
||||||
stew/shims/net as stewNet,
|
stew/[objects],
|
||||||
stew/[objects, results],
|
|
||||||
nat_traversal/[miniupnpc, natpmp],
|
nat_traversal/[miniupnpc, natpmp],
|
||||||
json_serialization/std/net
|
json_serialization/std/net,
|
||||||
|
results
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
@ -28,14 +28,29 @@ const
|
|||||||
PORT_MAPPING_INTERVAL = 20 * 60 # seconds
|
PORT_MAPPING_INTERVAL = 20 * 60 # seconds
|
||||||
NATPMP_LIFETIME = 60 * 60 # in seconds, must be longer than PORT_MAPPING_INTERVAL
|
NATPMP_LIFETIME = 60 * 60 # in seconds, must be longer than PORT_MAPPING_INTERVAL
|
||||||
|
|
||||||
var
|
type PortMappings* = object
|
||||||
upnp {.threadvar.}: Miniupnp
|
|
||||||
npmp {.threadvar.}: NatPmp
|
|
||||||
strategy = NatStrategy.NatNone
|
|
||||||
internalTcpPort: Port
|
internalTcpPort: Port
|
||||||
externalTcpPort: Port
|
externalTcpPort: Port
|
||||||
internalUdpPort: Port
|
internalUdpPort: Port
|
||||||
externalUdpPort: Port
|
externalUdpPort: Port
|
||||||
|
description: string
|
||||||
|
|
||||||
|
type PortMappingArgs =
|
||||||
|
tuple[strategy: NatStrategy, tcpPort, udpPort: Port, description: string]
|
||||||
|
|
||||||
|
type NatConfig* = object
|
||||||
|
case hasExtIp*: bool
|
||||||
|
of true: extIp*: IpAddress
|
||||||
|
of false: nat*: NatStrategy
|
||||||
|
|
||||||
|
var
|
||||||
|
upnp {.threadvar.}: Miniupnp
|
||||||
|
npmp {.threadvar.}: NatPmp
|
||||||
|
strategy = NatStrategy.NatNone
|
||||||
|
natClosed: Atomic[bool]
|
||||||
|
extIp: Option[IpAddress]
|
||||||
|
activeMappings: seq[PortMappings]
|
||||||
|
natThreads: seq[Thread[PortMappingArgs]] = @[]
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "nat"
|
topics = "nat"
|
||||||
@ -107,7 +122,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
externalIP = parseIpAddress($(nires.value))
|
externalIP = parseIpAddress($(nires.value))
|
||||||
strategy = NatPmp
|
strategy = NatStrategy.NatPmp
|
||||||
return some(externalIP)
|
return some(externalIP)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
error "parseIpAddress() exception", err = e.msg
|
error "parseIpAddress() exception", err = e.msg
|
||||||
@ -153,7 +168,7 @@ proc getPublicRoutePrefSrcOrExternalIP*(
|
|||||||
return some(extIp.get)
|
return some(extIp.get)
|
||||||
|
|
||||||
proc doPortMapping(
|
proc doPortMapping(
|
||||||
tcpPort, udpPort: Port, description: string
|
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
|
||||||
): Option[(Port, Port)] {.gcsafe.} =
|
): Option[(Port, Port)] {.gcsafe.} =
|
||||||
var
|
var
|
||||||
extTcpPort: Port
|
extTcpPort: Port
|
||||||
@ -213,15 +228,10 @@ proc doPortMapping(
|
|||||||
extUdpPort = extPort
|
extUdpPort = extPort
|
||||||
return some((extTcpPort, extUdpPort))
|
return some((extTcpPort, extUdpPort))
|
||||||
|
|
||||||
type PortMappingArgs = tuple[tcpPort, udpPort: Port, description: string]
|
|
||||||
var
|
|
||||||
natThread: Thread[PortMappingArgs]
|
|
||||||
natCloseChan: Channel[bool]
|
|
||||||
|
|
||||||
proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
|
proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
|
||||||
ignoreSignalsInThread()
|
ignoreSignalsInThread()
|
||||||
let
|
let
|
||||||
(tcpPort, udpPort, description) = args
|
(strategy, tcpPort, udpPort, description) = args
|
||||||
interval = initDuration(seconds = PORT_MAPPING_INTERVAL)
|
interval = initDuration(seconds = PORT_MAPPING_INTERVAL)
|
||||||
sleepDuration = 1_000 # in ms, also the maximum delay after pressing Ctrl-C
|
sleepDuration = 1_000 # in ms, also the maximum delay after pressing Ctrl-C
|
||||||
|
|
||||||
@ -233,30 +243,23 @@ proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
|
|||||||
# even though we don't need the external IP's value.
|
# even though we don't need the external IP's value.
|
||||||
let ipres = getExternalIP(strategy, quiet = true)
|
let ipres = getExternalIP(strategy, quiet = true)
|
||||||
if ipres.isSome:
|
if ipres.isSome:
|
||||||
while true:
|
while natClosed.load() == false:
|
||||||
# we're being silly here with this channel polling because we can't
|
let
|
||||||
# select on Nim channels like on Go ones
|
# we're being silly here with this channel polling because we can't
|
||||||
let (dataAvailable, _) =
|
# select on Nim channels like on Go ones
|
||||||
try:
|
currTime = now()
|
||||||
natCloseChan.tryRecv()
|
if currTime >= (lastUpdate + interval):
|
||||||
except Exception:
|
discard doPortMapping(strategy, tcpPort, udpPort, description)
|
||||||
(false, false)
|
lastUpdate = currTime
|
||||||
if dataAvailable:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
let currTime = now()
|
|
||||||
if currTime >= (lastUpdate + interval):
|
|
||||||
discard doPortMapping(tcpPort, udpPort, description)
|
|
||||||
lastUpdate = currTime
|
|
||||||
sleep(sleepDuration)
|
sleep(sleepDuration)
|
||||||
|
|
||||||
proc stopNatThread() {.noconv.} =
|
proc stopNatThreads() {.noconv.} =
|
||||||
# stop the thread
|
# stop the thread
|
||||||
|
debug "Stopping NAT port mapping renewal threads"
|
||||||
try:
|
try:
|
||||||
natCloseChan.send(true)
|
natClosed.store(true)
|
||||||
natThread.joinThread()
|
joinThreads(natThreads)
|
||||||
natCloseChan.close()
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
warn "Failed to stop NAT port mapping renewal thread", exc = exc.msg
|
warn "Failed to stop NAT port mapping renewal thread", exc = exc.msg
|
||||||
|
|
||||||
@ -268,54 +271,68 @@ proc stopNatThread() {.noconv.} =
|
|||||||
|
|
||||||
# In Windows, a new thread is created for the signal handler, so we need to
|
# In Windows, a new thread is created for the signal handler, so we need to
|
||||||
# initialise our threadvars again.
|
# initialise our threadvars again.
|
||||||
|
|
||||||
let ipres = getExternalIP(strategy, quiet = true)
|
let ipres = getExternalIP(strategy, quiet = true)
|
||||||
if ipres.isSome:
|
if ipres.isSome:
|
||||||
if strategy == NatStrategy.NatUpnp:
|
if strategy == NatStrategy.NatUpnp:
|
||||||
for t in [
|
for entry in activeMappings:
|
||||||
(externalTcpPort, internalTcpPort, UPNPProtocol.TCP),
|
for t in [
|
||||||
(externalUdpPort, internalUdpPort, UPNPProtocol.UDP),
|
(entry.externalTcpPort, entry.internalTcpPort, UPNPProtocol.TCP),
|
||||||
]:
|
(entry.externalUdpPort, entry.internalUdpPort, UPNPProtocol.UDP),
|
||||||
let
|
]:
|
||||||
(eport, iport, protocol) = t
|
let
|
||||||
pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol)
|
(eport, iport, protocol) = t
|
||||||
if pmres.isErr:
|
pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol)
|
||||||
error "UPnP port mapping deletion", msg = pmres.error
|
if pmres.isErr:
|
||||||
else:
|
error "UPnP port mapping deletion", msg = pmres.error
|
||||||
debug "UPnP: deleted port mapping",
|
else:
|
||||||
externalPort = eport, internalPort = iport, protocol = protocol
|
debug "UPnP: deleted port mapping",
|
||||||
|
externalPort = eport, internalPort = iport, protocol = protocol
|
||||||
elif strategy == NatStrategy.NatPmp:
|
elif strategy == NatStrategy.NatPmp:
|
||||||
for t in [
|
for entry in activeMappings:
|
||||||
(externalTcpPort, internalTcpPort, NatPmpProtocol.TCP),
|
for t in [
|
||||||
(externalUdpPort, internalUdpPort, NatPmpProtocol.UDP),
|
(entry.externalTcpPort, entry.internalTcpPort, NatPmpProtocol.TCP),
|
||||||
]:
|
(entry.externalUdpPort, entry.internalUdpPort, NatPmpProtocol.UDP),
|
||||||
let
|
]:
|
||||||
(eport, iport, protocol) = t
|
let
|
||||||
pmres = npmp.deletePortMapping(
|
(eport, iport, protocol) = t
|
||||||
eport = eport.cushort, iport = iport.cushort, protocol = protocol
|
pmres = npmp.deletePortMapping(
|
||||||
)
|
eport = eport.cushort, iport = iport.cushort, protocol = protocol
|
||||||
if pmres.isErr:
|
)
|
||||||
error "NAT-PMP port mapping deletion", msg = pmres.error
|
if pmres.isErr:
|
||||||
else:
|
error "NAT-PMP port mapping deletion", msg = pmres.error
|
||||||
debug "NAT-PMP: deleted port mapping",
|
else:
|
||||||
externalPort = eport, internalPort = iport, protocol = protocol
|
debug "NAT-PMP: deleted port mapping",
|
||||||
|
externalPort = eport, internalPort = iport, protocol = protocol
|
||||||
|
|
||||||
proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] =
|
proc redirectPorts*(
|
||||||
result = doPortMapping(tcpPort, udpPort, description)
|
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
|
||||||
|
): Option[(Port, Port)] =
|
||||||
|
result = doPortMapping(strategy, tcpPort, udpPort, description)
|
||||||
if result.isSome:
|
if result.isSome:
|
||||||
(externalTcpPort, externalUdpPort) = result.get()
|
let (externalTcpPort, externalUdpPort) = result.get()
|
||||||
# needed by NAT-PMP on port mapping deletion
|
# needed by NAT-PMP on port mapping deletion
|
||||||
internalTcpPort = tcpPort
|
|
||||||
internalUdpPort = udpPort
|
|
||||||
# Port mapping works. Let's launch a thread that repeats it, in case the
|
# Port mapping works. Let's launch a thread that repeats it, in case the
|
||||||
# NAT-PMP lease expires or the router is rebooted and forgets all about
|
# NAT-PMP lease expires or the router is rebooted and forgets all about
|
||||||
# these mappings.
|
# these mappings.
|
||||||
natCloseChan.open()
|
activeMappings.add(
|
||||||
|
PortMappings(
|
||||||
|
internalTcpPort: tcpPort,
|
||||||
|
externalTcpPort: externalTcpPort,
|
||||||
|
internalUdpPort: udpPort,
|
||||||
|
externalUdpPort: externalUdpPort,
|
||||||
|
description: description,
|
||||||
|
)
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
natThread.createThread(
|
natThreads.add(Thread[PortMappingArgs]())
|
||||||
repeatPortMapping, (externalTcpPort, externalUdpPort, description)
|
natThreads[^1].createThread(
|
||||||
|
repeatPortMapping, (strategy, externalTcpPort, externalUdpPort, description)
|
||||||
)
|
)
|
||||||
# atexit() in disguise
|
# atexit() in disguise
|
||||||
addQuitProc(stopNatThread)
|
if natThreads.len == 1:
|
||||||
|
# we should register the thread termination function only once
|
||||||
|
addQuitProc(stopNatThreads)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
|
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
|
||||||
|
|
||||||
@ -326,12 +343,15 @@ proc setupNat*(
|
|||||||
## If any of this fails, we don't return any IP address but do return the
|
## If any of this fails, we don't return any IP address but do return the
|
||||||
## original ports as best effort.
|
## original ports as best effort.
|
||||||
## TODO: Allow for tcp or udp port mapping to be optional.
|
## TODO: Allow for tcp or udp port mapping to be optional.
|
||||||
let extIp = getExternalIP(natStrategy)
|
if extIp.isNone:
|
||||||
|
extIp = getExternalIP(natStrategy)
|
||||||
if extIp.isSome:
|
if extIp.isSome:
|
||||||
let ip = extIp.get
|
let ip = extIp.get
|
||||||
let extPorts = (
|
let extPorts = (
|
||||||
{.gcsafe.}:
|
{.gcsafe.}:
|
||||||
redirectPorts(tcpPort = tcpPort, udpPort = udpPort, description = clientId)
|
redirectPorts(
|
||||||
|
strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId
|
||||||
|
)
|
||||||
)
|
)
|
||||||
if extPorts.isSome:
|
if extPorts.isSome:
|
||||||
let (extTcpPort, extUdpPort) = extPorts.get()
|
let (extTcpPort, extUdpPort) = extPorts.get()
|
||||||
@ -343,11 +363,6 @@ proc setupNat*(
|
|||||||
warn "UPnP/NAT-PMP not available"
|
warn "UPnP/NAT-PMP not available"
|
||||||
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
||||||
|
|
||||||
type NatConfig* = object
|
|
||||||
case hasExtIp*: bool
|
|
||||||
of true: extIp*: IpAddress
|
|
||||||
of false: nat*: NatStrategy
|
|
||||||
|
|
||||||
proc setupAddress*(
|
proc setupAddress*(
|
||||||
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
|
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
|
||||||
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =
|
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =
|
||||||
@ -389,7 +404,7 @@ proc nattedAddress*(
|
|||||||
natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port
|
natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port
|
||||||
): tuple[libp2p, discovery: seq[MultiAddress]] =
|
): tuple[libp2p, discovery: seq[MultiAddress]] =
|
||||||
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
|
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
|
||||||
## - Modified multiaddresses with NAT-mapped addresses for libp2p
|
## - Modified multiaddresses with NAT-mapped addresses for libp2p
|
||||||
## - Discovery addresses with NAT-mapped UDP ports
|
## - Discovery addresses with NAT-mapped UDP ports
|
||||||
|
|
||||||
var discoveryAddrs = newSeq[MultiAddress](0)
|
var discoveryAddrs = newSeq[MultiAddress](0)
|
||||||
|
|||||||
273
codex/node.nim
273
codex/node.nim
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -44,7 +44,7 @@ import ./indexingstrategy
|
|||||||
import ./utils
|
import ./utils
|
||||||
import ./errors
|
import ./errors
|
||||||
import ./logutils
|
import ./logutils
|
||||||
import ./utils/asynciter
|
import ./utils/safeasynciter
|
||||||
import ./utils/trackedfutures
|
import ./utils/trackedfutures
|
||||||
|
|
||||||
export logutils
|
export logutils
|
||||||
@ -52,7 +52,10 @@ export logutils
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "codex node"
|
topics = "codex node"
|
||||||
|
|
||||||
const DefaultFetchBatch = 10
|
const
|
||||||
|
DefaultFetchBatch = 1024
|
||||||
|
MaxOnBatchBlocks = 128
|
||||||
|
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
|
||||||
|
|
||||||
type
|
type
|
||||||
Contracts* =
|
Contracts* =
|
||||||
@ -78,7 +81,9 @@ type
|
|||||||
CodexNodeRef* = ref CodexNode
|
CodexNodeRef* = ref CodexNode
|
||||||
|
|
||||||
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
|
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
|
||||||
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
|
BatchProc* =
|
||||||
|
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||||
|
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
|
||||||
|
|
||||||
func switch*(self: CodexNodeRef): Switch =
|
func switch*(self: CodexNodeRef): Switch =
|
||||||
return self.switch
|
return self.switch
|
||||||
@ -109,7 +114,9 @@ proc storeManifest*(
|
|||||||
|
|
||||||
success blk
|
success blk
|
||||||
|
|
||||||
proc fetchManifest*(self: CodexNodeRef, cid: Cid): Future[?!Manifest] {.async.} =
|
proc fetchManifest*(
|
||||||
|
self: CodexNodeRef, cid: Cid
|
||||||
|
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
|
||||||
## Fetch and decode a manifest block
|
## Fetch and decode a manifest block
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -144,7 +151,7 @@ proc connect*(
|
|||||||
|
|
||||||
proc updateExpiry*(
|
proc updateExpiry*(
|
||||||
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
|
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
|
||||||
): Future[?!void] {.async.} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
without manifest =? await self.fetchManifest(manifestCid), error:
|
without manifest =? await self.fetchManifest(manifestCid), error:
|
||||||
trace "Unable to fetch manifest for cid", manifestCid
|
trace "Unable to fetch manifest for cid", manifestCid
|
||||||
return failure(error)
|
return failure(error)
|
||||||
@ -153,7 +160,11 @@ proc updateExpiry*(
|
|||||||
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
|
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
|
||||||
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
|
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
|
||||||
)
|
)
|
||||||
await allFuturesThrowing(ensuringFutures)
|
|
||||||
|
let res = await allFinishedFailed[?!void](ensuringFutures)
|
||||||
|
if res.failure.len > 0:
|
||||||
|
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||||
|
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
@ -168,7 +179,7 @@ proc fetchBatched*(
|
|||||||
batchSize = DefaultFetchBatch,
|
batchSize = DefaultFetchBatch,
|
||||||
onBatch: BatchProc = nil,
|
onBatch: BatchProc = nil,
|
||||||
fetchLocal = true,
|
fetchLocal = true,
|
||||||
): Future[?!void] {.async, gcsafe.} =
|
): Future[?!void] {.async: (raises: [CancelledError]), gcsafe.} =
|
||||||
## Fetch blocks in batches of `batchSize`
|
## Fetch blocks in batches of `batchSize`
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -178,23 +189,62 @@ proc fetchBatched*(
|
|||||||
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
|
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
|
||||||
# )
|
# )
|
||||||
|
|
||||||
while not iter.finished:
|
# Sliding window: maintain batchSize blocks in-flight
|
||||||
let blocks = collect:
|
let
|
||||||
for i in 0 ..< batchSize:
|
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
|
||||||
|
refillSize = max(refillThreshold, 1)
|
||||||
|
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
|
||||||
|
|
||||||
|
var
|
||||||
|
blockData: seq[bt.Block]
|
||||||
|
failedBlocks = 0
|
||||||
|
successfulBlocks = 0
|
||||||
|
completedInWindow = 0
|
||||||
|
|
||||||
|
var addresses = newSeqOfCap[BlockAddress](batchSize)
|
||||||
|
for i in 0 ..< batchSize:
|
||||||
|
if not iter.finished:
|
||||||
|
let address = BlockAddress.init(cid, iter.next())
|
||||||
|
if fetchLocal or not (await address in self.networkStore):
|
||||||
|
addresses.add(address)
|
||||||
|
|
||||||
|
var blockResults = await self.networkStore.getBlocks(addresses)
|
||||||
|
|
||||||
|
while not blockResults.finished:
|
||||||
|
without blk =? await blockResults.next(), err:
|
||||||
|
inc(failedBlocks)
|
||||||
|
continue
|
||||||
|
|
||||||
|
inc(successfulBlocks)
|
||||||
|
inc(completedInWindow)
|
||||||
|
|
||||||
|
if not onBatch.isNil:
|
||||||
|
blockData.add(blk)
|
||||||
|
if blockData.len >= maxCallbackBlocks:
|
||||||
|
if batchErr =? (await onBatch(blockData)).errorOption:
|
||||||
|
return failure(batchErr)
|
||||||
|
blockData = @[]
|
||||||
|
|
||||||
|
if completedInWindow >= refillThreshold and not iter.finished:
|
||||||
|
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
|
||||||
|
for i in 0 ..< refillSize:
|
||||||
if not iter.finished:
|
if not iter.finished:
|
||||||
let address = BlockAddress.init(cid, iter.next())
|
let address = BlockAddress.init(cid, iter.next())
|
||||||
if not (await address in self.networkStore) or fetchLocal:
|
if fetchLocal or not (await address in self.networkStore):
|
||||||
self.networkStore.getBlock(address)
|
refillAddresses.add(address)
|
||||||
|
|
||||||
if blocksErr =? (await allFutureResult(blocks)).errorOption:
|
if refillAddresses.len > 0:
|
||||||
return failure(blocksErr)
|
blockResults =
|
||||||
|
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
|
||||||
|
completedInWindow = 0
|
||||||
|
|
||||||
if not onBatch.isNil and
|
if failedBlocks > 0:
|
||||||
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
|
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
|
||||||
|
|
||||||
|
if not onBatch.isNil and blockData.len > 0:
|
||||||
|
if batchErr =? (await onBatch(blockData)).errorOption:
|
||||||
return failure(batchErr)
|
return failure(batchErr)
|
||||||
|
|
||||||
await sleepAsync(1.millis)
|
|
||||||
|
|
||||||
success()
|
success()
|
||||||
|
|
||||||
proc fetchBatched*(
|
proc fetchBatched*(
|
||||||
@ -203,7 +253,7 @@ proc fetchBatched*(
|
|||||||
batchSize = DefaultFetchBatch,
|
batchSize = DefaultFetchBatch,
|
||||||
onBatch: BatchProc = nil,
|
onBatch: BatchProc = nil,
|
||||||
fetchLocal = true,
|
fetchLocal = true,
|
||||||
): Future[?!void] =
|
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
## Fetch manifest in batches of `batchSize`
|
## Fetch manifest in batches of `batchSize`
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -213,7 +263,31 @@ proc fetchBatched*(
|
|||||||
let iter = Iter[int].new(0 ..< manifest.blocksCount)
|
let iter = Iter[int].new(0 ..< manifest.blocksCount)
|
||||||
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
|
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
|
||||||
|
|
||||||
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
|
proc fetchDatasetAsync*(
|
||||||
|
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
|
||||||
|
): Future[void] {.async: (raises: []).} =
|
||||||
|
## Asynchronously fetch a dataset in the background.
|
||||||
|
## This task will be tracked and cleaned up on node shutdown.
|
||||||
|
##
|
||||||
|
try:
|
||||||
|
if err =? (
|
||||||
|
await self.fetchBatched(
|
||||||
|
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
|
||||||
|
)
|
||||||
|
).errorOption:
|
||||||
|
error "Unable to fetch blocks", err = err.msg
|
||||||
|
except CancelledError as exc:
|
||||||
|
trace "Cancelled fetching blocks", exc = exc.msg
|
||||||
|
|
||||||
|
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
|
||||||
|
## Start fetching a dataset in the background.
|
||||||
|
## The task will be tracked and cleaned up on node shutdown.
|
||||||
|
##
|
||||||
|
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||||
|
|
||||||
|
proc streamSingleBlock(
|
||||||
|
self: CodexNodeRef, cid: Cid
|
||||||
|
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
|
||||||
## Streams the contents of a single block.
|
## Streams the contents of a single block.
|
||||||
##
|
##
|
||||||
trace "Streaming single block", cid = cid
|
trace "Streaming single block", cid = cid
|
||||||
@ -223,36 +297,31 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async
|
|||||||
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
proc streamOneBlock(): Future[void] {.async.} =
|
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
|
defer:
|
||||||
|
await stream.pushEof()
|
||||||
await stream.pushData(blk.data)
|
await stream.pushData(blk.data)
|
||||||
except CatchableError as exc:
|
except CancelledError as exc:
|
||||||
|
trace "Streaming block cancelled", cid, exc = exc.msg
|
||||||
|
except LPStreamError as exc:
|
||||||
trace "Unable to send block", cid, exc = exc.msg
|
trace "Unable to send block", cid, exc = exc.msg
|
||||||
discard
|
|
||||||
finally:
|
|
||||||
await stream.pushEof()
|
|
||||||
|
|
||||||
self.trackedFutures.track(streamOneBlock())
|
self.trackedFutures.track(streamOneBlock())
|
||||||
LPStream(stream).success
|
LPStream(stream).success
|
||||||
|
|
||||||
proc streamEntireDataset(
|
proc streamEntireDataset(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
|
||||||
manifest: Manifest,
|
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
|
||||||
manifestCid: Cid,
|
|
||||||
prefetchBatch = DefaultFetchBatch,
|
|
||||||
): Future[?!LPStream] {.async.} =
|
|
||||||
## Streams the contents of the entire dataset described by the manifest.
|
## Streams the contents of the entire dataset described by the manifest.
|
||||||
## Background jobs (erasure decoding and prefetching) will be cancelled when
|
|
||||||
## the stream is closed.
|
|
||||||
##
|
##
|
||||||
trace "Retrieving blocks from manifest", manifestCid
|
trace "Retrieving blocks from manifest", manifestCid
|
||||||
|
|
||||||
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
|
|
||||||
var jobs: seq[Future[void]]
|
var jobs: seq[Future[void]]
|
||||||
|
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
|
||||||
if manifest.protected:
|
if manifest.protected:
|
||||||
# Retrieve, decode and save to the local store all EС groups
|
# Retrieve, decode and save to the local store all EС groups
|
||||||
proc erasureJob(): Future[void] {.async.} =
|
proc erasureJob(): Future[void] {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
# Spawn an erasure decoding job
|
# Spawn an erasure decoding job
|
||||||
let erasure = Erasure.new(
|
let erasure = Erasure.new(
|
||||||
@ -260,40 +329,32 @@ proc streamEntireDataset(
|
|||||||
)
|
)
|
||||||
without _ =? (await erasure.decode(manifest)), error:
|
without _ =? (await erasure.decode(manifest)), error:
|
||||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||||
except CancelledError:
|
|
||||||
trace "Erasure job cancelled", manifestCid
|
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
|
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
|
||||||
|
|
||||||
jobs.add(erasureJob())
|
jobs.add(erasureJob())
|
||||||
|
|
||||||
proc prefetch(): Future[void] {.async.} =
|
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||||
try:
|
|
||||||
if err =?
|
|
||||||
(await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption:
|
|
||||||
error "Unable to fetch blocks", err = err.msg
|
|
||||||
except CancelledError:
|
|
||||||
trace "Prefetch job cancelled"
|
|
||||||
except CatchableError as exc:
|
|
||||||
error "Error fetching blocks", exc = exc.msg
|
|
||||||
|
|
||||||
jobs.add(prefetch())
|
|
||||||
|
|
||||||
# Monitor stream completion and cancel background jobs when done
|
# Monitor stream completion and cancel background jobs when done
|
||||||
proc monitorStream() {.async.} =
|
proc monitorStream() {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
await stream.join()
|
await stream.join()
|
||||||
|
except CancelledError as exc:
|
||||||
|
warn "Stream cancelled", exc = exc.msg
|
||||||
finally:
|
finally:
|
||||||
await allFutures(jobs.mapIt(it.cancelAndWait))
|
await noCancel allFutures(jobs.mapIt(it.cancelAndWait))
|
||||||
|
|
||||||
self.trackedFutures.track(monitorStream())
|
self.trackedFutures.track(monitorStream())
|
||||||
|
|
||||||
|
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||||
trace "Creating store stream for manifest", manifestCid
|
trace "Creating store stream for manifest", manifestCid
|
||||||
|
|
||||||
stream.success
|
stream.success
|
||||||
|
|
||||||
proc retrieve*(
|
proc retrieve*(
|
||||||
self: CodexNodeRef, cid: Cid, local: bool = true
|
self: CodexNodeRef, cid: Cid, local: bool = true
|
||||||
): Future[?!LPStream] {.async.} =
|
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
|
||||||
## Retrieve by Cid a single block or an entire dataset described by manifest
|
## Retrieve by Cid a single block or an entire dataset described by manifest
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -373,6 +434,7 @@ proc store*(
|
|||||||
filename: ?string = string.none,
|
filename: ?string = string.none,
|
||||||
mimetype: ?string = string.none,
|
mimetype: ?string = string.none,
|
||||||
blockSize = DefaultBlockSize,
|
blockSize = DefaultBlockSize,
|
||||||
|
onBlockStored: OnBlockStoredProc = nil,
|
||||||
): Future[?!Cid] {.async.} =
|
): Future[?!Cid] {.async.} =
|
||||||
## Save stream contents as dataset with given blockSize
|
## Save stream contents as dataset with given blockSize
|
||||||
## to nodes's BlockStore, and return Cid of its manifest
|
## to nodes's BlockStore, and return Cid of its manifest
|
||||||
@ -402,6 +464,9 @@ proc store*(
|
|||||||
if err =? (await self.networkStore.putBlock(blk)).errorOption:
|
if err =? (await self.networkStore.putBlock(blk)).errorOption:
|
||||||
error "Unable to store block", cid = blk.cid, err = err.msg
|
error "Unable to store block", cid = blk.cid, err = err.msg
|
||||||
return failure(&"Unable to store block {blk.cid}")
|
return failure(&"Unable to store block {blk.cid}")
|
||||||
|
|
||||||
|
if not onBlockStored.isNil:
|
||||||
|
onBlockStored(chunk)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
@ -449,11 +514,11 @@ proc store*(
|
|||||||
return manifestBlk.cid.success
|
return manifestBlk.cid.success
|
||||||
|
|
||||||
proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
|
proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
|
||||||
without cids =? await self.networkStore.listBlocks(BlockType.Manifest):
|
without cidsIter =? await self.networkStore.listBlocks(BlockType.Manifest):
|
||||||
warn "Failed to listBlocks"
|
warn "Failed to listBlocks"
|
||||||
return
|
return
|
||||||
|
|
||||||
for c in cids:
|
for c in cidsIter:
|
||||||
if cid =? await c:
|
if cid =? await c:
|
||||||
without blk =? await self.networkStore.getBlock(cid):
|
without blk =? await self.networkStore.getBlock(cid):
|
||||||
warn "Failed to get manifest block by cid", cid
|
warn "Failed to get manifest block by cid", cid
|
||||||
@ -591,8 +656,13 @@ proc requestStorage*(
|
|||||||
success purchase.id
|
success purchase.id
|
||||||
|
|
||||||
proc onStore(
|
proc onStore(
|
||||||
self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb
|
self: CodexNodeRef,
|
||||||
): Future[?!void] {.async.} =
|
request: StorageRequest,
|
||||||
|
expiry: SecondsSince1970,
|
||||||
|
slotIdx: uint64,
|
||||||
|
blocksCb: BlocksCb,
|
||||||
|
isRepairing: bool = false,
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
## store data in local storage
|
## store data in local storage
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -613,19 +683,22 @@ proc onStore(
|
|||||||
trace "Unable to create slots builder", err = err.msg
|
trace "Unable to create slots builder", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
let expiry = request.expiry
|
|
||||||
|
|
||||||
if slotIdx > manifest.slotRoots.high.uint64:
|
if slotIdx > manifest.slotRoots.high.uint64:
|
||||||
trace "Slot index not in manifest", slotIdx
|
trace "Slot index not in manifest", slotIdx
|
||||||
return failure(newException(CodexError, "Slot index not in manifest"))
|
return failure(newException(CodexError, "Slot index not in manifest"))
|
||||||
|
|
||||||
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
|
proc updateExpiry(
|
||||||
|
blocks: seq[bt.Block]
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
trace "Updating expiry for blocks", blocks = blocks.len
|
trace "Updating expiry for blocks", blocks = blocks.len
|
||||||
|
|
||||||
let ensureExpiryFutures =
|
let ensureExpiryFutures =
|
||||||
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
|
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
|
||||||
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
|
|
||||||
return failure(updateExpiryErr)
|
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
|
||||||
|
if res.failure.len > 0:
|
||||||
|
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||||
|
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||||
|
|
||||||
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
|
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
|
||||||
trace "Unable to process blocks", err = err.msg
|
trace "Unable to process blocks", err = err.msg
|
||||||
@ -633,32 +706,45 @@ proc onStore(
|
|||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
without indexer =?
|
|
||||||
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
|
|
||||||
err:
|
|
||||||
trace "Unable to create indexing strategy from protected manifest", err = err.msg
|
|
||||||
return failure(err)
|
|
||||||
|
|
||||||
if slotIdx > int.high.uint64:
|
if slotIdx > int.high.uint64:
|
||||||
error "Cannot cast slot index to int", slotIndex = slotIdx
|
error "Cannot cast slot index to int", slotIndex = slotIdx
|
||||||
return
|
return
|
||||||
|
|
||||||
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err:
|
if isRepairing:
|
||||||
trace "Unable to get indicies from strategy", err = err.msg
|
trace "start repairing slot", slotIdx
|
||||||
return failure(err)
|
try:
|
||||||
|
let erasure = Erasure.new(
|
||||||
|
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
|
||||||
|
)
|
||||||
|
if err =? (await erasure.repair(manifest)).errorOption:
|
||||||
|
error "Unable to erasure decode repairing manifest",
|
||||||
|
cid = manifest.treeCid, exc = err.msg
|
||||||
|
return failure(err)
|
||||||
|
except CatchableError as exc:
|
||||||
|
error "Error erasure decoding repairing manifest",
|
||||||
|
cid = manifest.treeCid, exc = exc.msg
|
||||||
|
return failure(exc.msg)
|
||||||
|
else:
|
||||||
|
without indexer =?
|
||||||
|
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
|
||||||
|
err:
|
||||||
|
trace "Unable to create indexing strategy from protected manifest", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
if err =? (
|
without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
|
||||||
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
|
trace "Unable to get indices from strategy", err = err.msg
|
||||||
).errorOption:
|
return failure(err)
|
||||||
trace "Unable to fetch blocks", err = err.msg
|
|
||||||
return failure(err)
|
if err =? (
|
||||||
|
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
|
||||||
|
).errorOption:
|
||||||
|
trace "Unable to fetch blocks", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
|
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
|
||||||
trace "Unable to build slot", err = err.msg
|
trace "Unable to build slot", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
trace "Slot successfully retrieved and reconstructed"
|
|
||||||
|
|
||||||
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
|
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
|
||||||
trace "Slot root mismatch",
|
trace "Slot root mismatch",
|
||||||
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
||||||
@ -670,7 +756,7 @@ proc onStore(
|
|||||||
|
|
||||||
proc onProve(
|
proc onProve(
|
||||||
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
|
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
|
||||||
): Future[?!Groth16Proof] {.async.} =
|
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
|
||||||
## Generats a proof for a given slot and challenge
|
## Generats a proof for a given slot and challenge
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -726,7 +812,7 @@ proc onProve(
|
|||||||
|
|
||||||
proc onExpiryUpdate(
|
proc onExpiryUpdate(
|
||||||
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
|
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
|
||||||
): Future[?!void] {.async.} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
return await self.updateExpiry(rootCid, expiry)
|
return await self.updateExpiry(rootCid, expiry)
|
||||||
|
|
||||||
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
|
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
|
||||||
@ -745,13 +831,17 @@ proc start*(self: CodexNodeRef) {.async.} =
|
|||||||
|
|
||||||
if hostContracts =? self.contracts.host:
|
if hostContracts =? self.contracts.host:
|
||||||
hostContracts.sales.onStore = proc(
|
hostContracts.sales.onStore = proc(
|
||||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
request: StorageRequest,
|
||||||
): Future[?!void] =
|
expiry: SecondsSince1970,
|
||||||
self.onStore(request, slot, onBatch)
|
slot: uint64,
|
||||||
|
onBatch: BatchProc,
|
||||||
|
isRepairing: bool = false,
|
||||||
|
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
|
self.onStore(request, expiry, slot, onBatch, isRepairing)
|
||||||
|
|
||||||
hostContracts.sales.onExpiryUpdate = proc(
|
hostContracts.sales.onExpiryUpdate = proc(
|
||||||
rootCid: Cid, expiry: SecondsSince1970
|
rootCid: Cid, expiry: SecondsSince1970
|
||||||
): Future[?!void] =
|
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
self.onExpiryUpdate(rootCid, expiry)
|
self.onExpiryUpdate(rootCid, expiry)
|
||||||
|
|
||||||
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
|
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
|
||||||
@ -760,7 +850,7 @@ proc start*(self: CodexNodeRef) {.async.} =
|
|||||||
|
|
||||||
hostContracts.sales.onProve = proc(
|
hostContracts.sales.onProve = proc(
|
||||||
slot: Slot, challenge: ProofChallenge
|
slot: Slot, challenge: ProofChallenge
|
||||||
): Future[?!Groth16Proof] =
|
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
|
||||||
# TODO: generate proof
|
# TODO: generate proof
|
||||||
self.onProve(slot, challenge)
|
self.onProve(slot, challenge)
|
||||||
|
|
||||||
@ -791,14 +881,11 @@ proc start*(self: CodexNodeRef) {.async.} =
|
|||||||
self.contracts.validator = ValidatorInteractions.none
|
self.contracts.validator = ValidatorInteractions.none
|
||||||
|
|
||||||
self.networkId = self.switch.peerInfo.peerId
|
self.networkId = self.switch.peerInfo.peerId
|
||||||
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs
|
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
|
||||||
|
|
||||||
proc stop*(self: CodexNodeRef) {.async.} =
|
proc stop*(self: CodexNodeRef) {.async.} =
|
||||||
trace "Stopping node"
|
trace "Stopping node"
|
||||||
|
|
||||||
if not self.taskpool.isNil:
|
|
||||||
self.taskpool.shutdown()
|
|
||||||
|
|
||||||
await self.trackedFutures.cancelTracked()
|
await self.trackedFutures.cancelTracked()
|
||||||
|
|
||||||
if not self.engine.isNil:
|
if not self.engine.isNil:
|
||||||
@ -819,6 +906,7 @@ proc stop*(self: CodexNodeRef) {.async.} =
|
|||||||
if not self.clock.isNil:
|
if not self.clock.isNil:
|
||||||
await self.clock.stop()
|
await self.clock.stop()
|
||||||
|
|
||||||
|
proc close*(self: CodexNodeRef) {.async.} =
|
||||||
if not self.networkStore.isNil:
|
if not self.networkStore.isNil:
|
||||||
await self.networkStore.close
|
await self.networkStore.close
|
||||||
|
|
||||||
@ -845,3 +933,10 @@ proc new*(
|
|||||||
contracts: contracts,
|
contracts: contracts,
|
||||||
trackedFutures: TrackedFutures(),
|
trackedFutures: TrackedFutures(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc hasLocalBlock*(
|
||||||
|
self: CodexNodeRef, cid: Cid
|
||||||
|
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||||
|
## Returns true if the given Cid is present in the local store
|
||||||
|
|
||||||
|
return await (cid in self.networkStore.localStore)
|
||||||
|
|||||||
@ -30,12 +30,12 @@ method run*(
|
|||||||
requestId = purchase.requestId
|
requestId = purchase.requestId
|
||||||
|
|
||||||
proc wait() {.async.} =
|
proc wait() {.async.} =
|
||||||
let done = newFuture[void]()
|
let done = newAsyncEvent()
|
||||||
proc callback(_: RequestId) =
|
proc callback(_: RequestId) =
|
||||||
done.complete()
|
done.fire()
|
||||||
|
|
||||||
let subscription = await market.subscribeFulfillment(request.id, callback)
|
let subscription = await market.subscribeFulfillment(request.id, callback)
|
||||||
await done
|
await done.wait()
|
||||||
await subscription.unsubscribe()
|
await subscription.unsubscribe()
|
||||||
|
|
||||||
proc withTimeout(future: Future[void]) {.async.} =
|
proc withTimeout(future: Future[void]) {.async.} =
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,10 +7,7 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/mimetypes
|
import std/mimetypes
|
||||||
@ -65,30 +62,43 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
|
|||||||
|
|
||||||
return %RestContentList.init(content)
|
return %RestContentList.init(content)
|
||||||
|
|
||||||
|
proc isPending(resp: HttpResponseRef): bool =
|
||||||
|
## Checks that an HttpResponseRef object is still pending; i.e.,
|
||||||
|
## that no body has yet been sent. This helps us guard against calling
|
||||||
|
## sendBody(resp: HttpResponseRef, ...) twice, which is illegal.
|
||||||
|
return resp.getResponseState() == HttpResponseState.Empty
|
||||||
|
|
||||||
proc retrieveCid(
|
proc retrieveCid(
|
||||||
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
|
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
|
||||||
): Future[RestApiResponse] {.async.} =
|
): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} =
|
||||||
## Download a file from the node in a streaming
|
## Download a file from the node in a streaming
|
||||||
## manner
|
## manner
|
||||||
##
|
##
|
||||||
|
|
||||||
var stream: LPStream
|
var lpStream: LPStream
|
||||||
|
|
||||||
var bytes = 0
|
var bytes = 0
|
||||||
try:
|
try:
|
||||||
without stream =? (await node.retrieve(cid, local)), error:
|
without stream =? (await node.retrieve(cid, local)), error:
|
||||||
if error of BlockNotFoundError:
|
if error of BlockNotFoundError:
|
||||||
resp.status = Http404
|
resp.status = Http404
|
||||||
return await resp.sendBody("")
|
await resp.sendBody(
|
||||||
|
"The requested CID could not be retrieved (" & error.msg & ")."
|
||||||
|
)
|
||||||
|
return
|
||||||
else:
|
else:
|
||||||
resp.status = Http500
|
resp.status = Http500
|
||||||
return await resp.sendBody(error.msg)
|
await resp.sendBody(error.msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
lpStream = stream
|
||||||
|
|
||||||
# It is ok to fetch again the manifest because it will hit the cache
|
# It is ok to fetch again the manifest because it will hit the cache
|
||||||
without manifest =? (await node.fetchManifest(cid)), err:
|
without manifest =? (await node.fetchManifest(cid)), err:
|
||||||
error "Failed to fetch manifest", err = err.msg
|
error "Failed to fetch manifest", err = err.msg
|
||||||
resp.status = Http404
|
resp.status = Http404
|
||||||
return await resp.sendBody(err.msg)
|
await resp.sendBody(err.msg)
|
||||||
|
return
|
||||||
|
|
||||||
if manifest.mimetype.isSome:
|
if manifest.mimetype.isSome:
|
||||||
resp.setHeader("Content-Type", manifest.mimetype.get())
|
resp.setHeader("Content-Type", manifest.mimetype.get())
|
||||||
@ -103,7 +113,14 @@ proc retrieveCid(
|
|||||||
else:
|
else:
|
||||||
resp.setHeader("Content-Disposition", "attachment")
|
resp.setHeader("Content-Disposition", "attachment")
|
||||||
|
|
||||||
await resp.prepareChunked()
|
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
|
||||||
|
# the length of the non-erasure-coded dataset, as that's what we will be
|
||||||
|
# returning to the client.
|
||||||
|
let contentLength =
|
||||||
|
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
|
||||||
|
resp.setHeader("Content-Length", $(contentLength.int))
|
||||||
|
|
||||||
|
await resp.prepare(HttpResponseStreamType.Plain)
|
||||||
|
|
||||||
while not stream.atEof:
|
while not stream.atEof:
|
||||||
var
|
var
|
||||||
@ -116,17 +133,20 @@ proc retrieveCid(
|
|||||||
|
|
||||||
bytes += buff.len
|
bytes += buff.len
|
||||||
|
|
||||||
await resp.sendChunk(addr buff[0], buff.len)
|
await resp.send(addr buff[0], buff.len)
|
||||||
await resp.finish()
|
await resp.finish()
|
||||||
codex_api_downloads.inc()
|
codex_api_downloads.inc()
|
||||||
except CatchableError as exc:
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except LPStreamError as exc:
|
||||||
warn "Error streaming blocks", exc = exc.msg
|
warn "Error streaming blocks", exc = exc.msg
|
||||||
resp.status = Http500
|
resp.status = Http500
|
||||||
return await resp.sendBody("")
|
if resp.isPending():
|
||||||
|
await resp.sendBody(exc.msg)
|
||||||
finally:
|
finally:
|
||||||
info "Sent bytes", cid = cid, bytes
|
info "Sent bytes", cid = cid, bytes
|
||||||
if not stream.isNil:
|
if not lpStream.isNil:
|
||||||
await stream.close()
|
await lpStream.close()
|
||||||
|
|
||||||
proc buildCorsHeaders(
|
proc buildCorsHeaders(
|
||||||
httpMethod: string, allowedOrigin: Option[string]
|
httpMethod: string, allowedOrigin: Option[string]
|
||||||
@ -160,7 +180,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
|
|||||||
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
|
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
|
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
|
||||||
|
|
||||||
router.api(MethodOptions, "/api/codex/v1/data") do(
|
router.api(MethodOptions, "/api/storage/v1/data") do(
|
||||||
resp: HttpResponseRef
|
resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
@ -172,7 +192,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
resp.status = Http204
|
resp.status = Http204
|
||||||
await resp.sendBody("")
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse:
|
router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
|
||||||
## Upload a file in a streaming manner
|
## Upload a file in a streaming manner
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -234,11 +254,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
finally:
|
finally:
|
||||||
await reader.closeWait()
|
await reader.closeWait()
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse:
|
||||||
let json = await formatManifestBlocks(node)
|
let json = await formatManifestBlocks(node)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do(
|
router.api(MethodOptions, "/api/storage/v1/data/{cid}") do(
|
||||||
cid: Cid, resp: HttpResponseRef
|
cid: Cid, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
@ -247,7 +267,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
resp.status = Http204
|
resp.status = Http204
|
||||||
await resp.sendBody("")
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/data/{cid}") do(
|
router.api(MethodGet, "/api/storage/v1/data/{cid}") do(
|
||||||
cid: Cid, resp: HttpResponseRef
|
cid: Cid, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -263,11 +283,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
|
|
||||||
await node.retrieveCid(cid.get(), local = true, resp = resp)
|
await node.retrieveCid(cid.get(), local = true, resp = resp)
|
||||||
|
|
||||||
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do(
|
router.api(MethodDelete, "/api/storage/v1/data/{cid}") do(
|
||||||
cid: Cid, resp: HttpResponseRef
|
cid: Cid, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Deletes either a single block or an entire dataset
|
## Deletes either a single block or an entire dataset
|
||||||
## from the local node. Does nothing and returns 200
|
## from the local node. Does nothing and returns 204
|
||||||
## if the dataset is not locally available.
|
## if the dataset is not locally available.
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("DELETE", allowedOrigin)
|
var headers = buildCorsHeaders("DELETE", allowedOrigin)
|
||||||
@ -284,7 +304,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
resp.status = Http204
|
resp.status = Http204
|
||||||
await resp.sendBody("")
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do(
|
router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do(
|
||||||
cid: Cid, resp: HttpResponseRef
|
cid: Cid, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Download a file from the network to the local node
|
## Download a file from the network to the local node
|
||||||
@ -299,20 +319,13 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
error "Failed to fetch manifest", err = err.msg
|
error "Failed to fetch manifest", err = err.msg
|
||||||
return RestApiResponse.error(Http404, err.msg, headers = headers)
|
return RestApiResponse.error(Http404, err.msg, headers = headers)
|
||||||
|
|
||||||
proc fetchDatasetAsync(): Future[void] {.async.} =
|
# Start fetching the dataset in the background
|
||||||
try:
|
node.fetchDatasetAsyncTask(manifest)
|
||||||
if err =? (await node.fetchBatched(manifest)).errorOption:
|
|
||||||
error "Unable to fetch dataset", cid = cid.get(), err = err.msg
|
|
||||||
except CatchableError as exc:
|
|
||||||
error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg
|
|
||||||
discard
|
|
||||||
|
|
||||||
asyncSpawn fetchDatasetAsync()
|
|
||||||
|
|
||||||
let json = %formatManifest(cid.get(), manifest)
|
let json = %formatManifest(cid.get(), manifest)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do(
|
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
|
||||||
cid: Cid, resp: HttpResponseRef
|
cid: Cid, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Download a file from the network in a streaming
|
## Download a file from the network in a streaming
|
||||||
@ -328,9 +341,10 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
resp.setCorsHeaders("GET", corsOrigin)
|
resp.setCorsHeaders("GET", corsOrigin)
|
||||||
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||||
|
|
||||||
|
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
|
||||||
await node.retrieveCid(cid.get(), local = false, resp = resp)
|
await node.retrieveCid(cid.get(), local = false, resp = resp)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
|
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do(
|
||||||
cid: Cid, resp: HttpResponseRef
|
cid: Cid, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Download only the manifest.
|
## Download only the manifest.
|
||||||
@ -348,7 +362,23 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
let json = %formatManifest(cid.get(), manifest)
|
let json = %formatManifest(cid.get(), manifest)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do(
|
||||||
|
cid: Cid, resp: HttpResponseRef
|
||||||
|
) -> RestApiResponse:
|
||||||
|
## Only test if the give CID is available in the local store
|
||||||
|
##
|
||||||
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
|
if cid.isErr:
|
||||||
|
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||||
|
|
||||||
|
let cid = cid.get()
|
||||||
|
let hasCid = await node.hasLocalBlock(cid)
|
||||||
|
|
||||||
|
let json = %*{$cid: hasCid}
|
||||||
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
|
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
|
||||||
let json =
|
let json =
|
||||||
%RestRepoStore(
|
%RestRepoStore(
|
||||||
totalBlocks: repoStore.totalBlocks,
|
totalBlocks: repoStore.totalBlocks,
|
||||||
@ -361,7 +391,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin
|
let allowedOrigin = router.allowedOrigin
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
## Returns active slots for the host
|
## Returns active slots for the host
|
||||||
@ -379,7 +409,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do(
|
router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
|
||||||
slotId: SlotId
|
slotId: SlotId
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
||||||
@ -409,7 +439,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
restAgent.toJson, contentType = "application/json", headers = headers
|
restAgent.toJson, contentType = "application/json", headers = headers
|
||||||
)
|
)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
|
||||||
## Returns storage that is for sale
|
## Returns storage that is for sale
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
@ -431,7 +461,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
|
router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
|
||||||
## Add available storage to sell.
|
## Add available storage to sell.
|
||||||
## Every time Availability's offer finishes, its capacity is
|
## Every time Availability's offer finishes, its capacity is
|
||||||
## returned to the availability.
|
## returned to the availability.
|
||||||
@ -460,7 +490,24 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
|
|
||||||
if restAv.totalSize == 0:
|
if restAv.totalSize == 0:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400, "Total size must be larger then zero", headers = headers
|
Http422, "Total size must be larger then zero", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if restAv.duration == 0:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422, "duration must be larger then zero", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if restAv.minPricePerBytePerSecond == 0:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422,
|
||||||
|
"minPricePerBytePerSecond must be larger then zero",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
|
|
||||||
|
if restAv.totalCollateral == 0:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422, "totalCollateral must be larger then zero", headers = headers
|
||||||
)
|
)
|
||||||
|
|
||||||
if not reservations.hasAvailable(restAv.totalSize):
|
if not reservations.hasAvailable(restAv.totalSize):
|
||||||
@ -469,10 +516,19 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
|
|
||||||
without availability =? (
|
without availability =? (
|
||||||
await reservations.createAvailability(
|
await reservations.createAvailability(
|
||||||
restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond,
|
restAv.totalSize,
|
||||||
|
restAv.duration,
|
||||||
|
restAv.minPricePerBytePerSecond,
|
||||||
restAv.totalCollateral,
|
restAv.totalCollateral,
|
||||||
|
enabled = restAv.enabled |? true,
|
||||||
|
until = restAv.until |? 0,
|
||||||
)
|
)
|
||||||
), error:
|
), error:
|
||||||
|
if error of CancelledError:
|
||||||
|
raise error
|
||||||
|
if error of UntilOutOfBoundsError:
|
||||||
|
return RestApiResponse.error(Http422, error.msg)
|
||||||
|
|
||||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||||
|
|
||||||
return RestApiResponse.response(
|
return RestApiResponse.response(
|
||||||
@ -485,7 +541,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do(
|
router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
|
||||||
id: AvailabilityId, resp: HttpResponseRef
|
id: AvailabilityId, resp: HttpResponseRef
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
@ -494,7 +550,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
resp.status = Http204
|
resp.status = Http204
|
||||||
await resp.sendBody("")
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do(
|
router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
|
||||||
id: AvailabilityId
|
id: AvailabilityId
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Updates Availability.
|
## Updates Availability.
|
||||||
@ -509,6 +565,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
## tokens) to be matched against the request's pricePerBytePerSecond
|
## tokens) to be matched against the request's pricePerBytePerSecond
|
||||||
## totalCollateral - total collateral (in amount of
|
## totalCollateral - total collateral (in amount of
|
||||||
## tokens) that can be distributed among matching requests
|
## tokens) that can be distributed among matching requests
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.host:
|
without contracts =? node.contracts.host:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled")
|
return RestApiResponse.error(Http503, "Persistence is not enabled")
|
||||||
@ -533,17 +590,23 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
return RestApiResponse.error(Http500, error.msg)
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
if isSome restAv.freeSize:
|
if isSome restAv.freeSize:
|
||||||
return RestApiResponse.error(Http400, "Updating freeSize is not allowed")
|
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
|
||||||
|
|
||||||
if size =? restAv.totalSize:
|
if size =? restAv.totalSize:
|
||||||
|
if size == 0:
|
||||||
|
return RestApiResponse.error(Http422, "Total size must be larger then zero")
|
||||||
|
|
||||||
# we don't allow lowering the totalSize bellow currently utilized size
|
# we don't allow lowering the totalSize bellow currently utilized size
|
||||||
if size < (availability.totalSize - availability.freeSize):
|
if size < (availability.totalSize - availability.freeSize):
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400,
|
Http422,
|
||||||
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
|
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
|
||||||
$(availability.totalSize - availability.freeSize),
|
$(availability.totalSize - availability.freeSize),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not reservations.hasAvailable(size):
|
||||||
|
return RestApiResponse.error(Http422, "Not enough storage quota")
|
||||||
|
|
||||||
availability.freeSize += size - availability.totalSize
|
availability.freeSize += size - availability.totalSize
|
||||||
availability.totalSize = size
|
availability.totalSize = size
|
||||||
|
|
||||||
@ -556,15 +619,26 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
if totalCollateral =? restAv.totalCollateral:
|
if totalCollateral =? restAv.totalCollateral:
|
||||||
availability.totalCollateral = totalCollateral
|
availability.totalCollateral = totalCollateral
|
||||||
|
|
||||||
if err =? (await reservations.update(availability)).errorOption:
|
if until =? restAv.until:
|
||||||
return RestApiResponse.error(Http500, err.msg)
|
availability.until = until
|
||||||
|
|
||||||
return RestApiResponse.response(Http200)
|
if enabled =? restAv.enabled:
|
||||||
|
availability.enabled = enabled
|
||||||
|
|
||||||
|
if err =? (await reservations.update(availability)).errorOption:
|
||||||
|
if err of CancelledError:
|
||||||
|
raise err
|
||||||
|
if err of UntilOutOfBoundsError:
|
||||||
|
return RestApiResponse.error(Http422, err.msg)
|
||||||
|
else:
|
||||||
|
return RestApiResponse.error(Http500, err.msg)
|
||||||
|
|
||||||
|
return RestApiResponse.response(Http204)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500)
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do(
|
router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
|
||||||
id: AvailabilityId
|
id: AvailabilityId
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Gets Availability's reservations.
|
## Gets Availability's reservations.
|
||||||
@ -608,7 +682,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin
|
let allowedOrigin = router.allowedOrigin
|
||||||
|
|
||||||
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do(
|
router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
|
||||||
cid: Cid
|
cid: Cid
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("POST", allowedOrigin)
|
var headers = buildCorsHeaders("POST", allowedOrigin)
|
||||||
@ -637,10 +711,36 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
without params =? StorageRequestParams.fromJson(body), error:
|
without params =? StorageRequestParams.fromJson(body), error:
|
||||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||||
|
|
||||||
|
let expiry = params.expiry
|
||||||
|
|
||||||
|
if expiry <= 0 or expiry >= params.duration:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422,
|
||||||
|
"Expiry must be greater than zero and less than the request's duration",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
|
|
||||||
|
if params.proofProbability <= 0:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422, "Proof probability must be greater than zero", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if params.collateralPerByte <= 0:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422, "Collateral per byte must be greater than zero", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if params.pricePerBytePerSecond <= 0:
|
||||||
|
return RestApiResponse.error(
|
||||||
|
Http422,
|
||||||
|
"Price per byte per second must be greater than zero",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
|
|
||||||
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
|
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
|
||||||
if params.duration > requestDurationLimit:
|
if params.duration > requestDurationLimit:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400,
|
Http422,
|
||||||
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
|
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
|
||||||
headers = headers,
|
headers = headers,
|
||||||
)
|
)
|
||||||
@ -650,13 +750,13 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
|
|
||||||
if tolerance == 0:
|
if tolerance == 0:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400, "Tolerance needs to be bigger then zero", headers = headers
|
Http422, "Tolerance needs to be bigger then zero", headers = headers
|
||||||
)
|
)
|
||||||
|
|
||||||
# prevent underflow
|
# prevent underflow
|
||||||
if tolerance > nodes:
|
if tolerance > nodes:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400,
|
Http422,
|
||||||
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
|
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
|
||||||
headers = headers,
|
headers = headers,
|
||||||
)
|
)
|
||||||
@ -667,21 +767,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
# ensure leopard constrainst of 1 < K ≥ M
|
# ensure leopard constrainst of 1 < K ≥ M
|
||||||
if ecK <= 1 or ecK < ecM:
|
if ecK <= 1 or ecK < ecM:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400,
|
Http422,
|
||||||
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
|
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
|
||||||
headers = headers,
|
headers = headers,
|
||||||
)
|
)
|
||||||
|
|
||||||
without expiry =? params.expiry:
|
|
||||||
return RestApiResponse.error(Http400, "Expiry required", headers = headers)
|
|
||||||
|
|
||||||
if expiry <= 0 or expiry >= params.duration:
|
|
||||||
return RestApiResponse.error(
|
|
||||||
Http400,
|
|
||||||
"Expiry needs value bigger then zero and smaller then the request's duration",
|
|
||||||
headers = headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
without purchaseId =?
|
without purchaseId =?
|
||||||
await node.requestStorage(
|
await node.requestStorage(
|
||||||
cid, params.duration, params.proofProbability, nodes, tolerance,
|
cid, params.duration, params.proofProbability, nodes, tolerance,
|
||||||
@ -689,7 +779,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
), error:
|
), error:
|
||||||
if error of InsufficientBlocksError:
|
if error of InsufficientBlocksError:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(
|
||||||
Http400,
|
Http422,
|
||||||
"Dataset too small for erasure parameters, need at least " &
|
"Dataset too small for erasure parameters, need at least " &
|
||||||
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
|
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
|
||||||
headers = headers,
|
headers = headers,
|
||||||
@ -702,7 +792,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do(
|
router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
|
||||||
id: PurchaseId
|
id: PurchaseId
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -734,7 +824,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -756,7 +846,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
|
|
||||||
## various node management api's
|
## various node management api's
|
||||||
##
|
##
|
||||||
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse:
|
||||||
## Returns node SPR in requested format, json or text.
|
## Returns node SPR in requested format, json or text.
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -779,7 +869,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse:
|
||||||
## Returns node's peerId in requested format, json or text.
|
## Returns node's peerId in requested format, json or text.
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -798,7 +888,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do(
|
router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do(
|
||||||
peerId: PeerId, addrs: seq[MultiAddress]
|
peerId: PeerId, addrs: seq[MultiAddress]
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Connect to a peer
|
## Connect to a peer
|
||||||
@ -836,7 +926,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin
|
let allowedOrigin = router.allowedOrigin
|
||||||
|
|
||||||
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse:
|
router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse:
|
||||||
## Print rudimentary node information
|
## Print rudimentary node information
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -856,7 +946,11 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
"",
|
"",
|
||||||
"announceAddresses": node.discovery.announceAddrs,
|
"announceAddresses": node.discovery.announceAddrs,
|
||||||
"table": table,
|
"table": table,
|
||||||
"codex": {"version": $codexVersion, "revision": $codexRevision},
|
"storage": {
|
||||||
|
"version": $codexVersion,
|
||||||
|
"revision": $codexRevision,
|
||||||
|
"contracts": $codexContractsRevision,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# return pretty json for human readability
|
# return pretty json for human readability
|
||||||
@ -867,7 +961,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do(
|
router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do(
|
||||||
level: Option[string]
|
level: Option[string]
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Set log level at run time
|
## Set log level at run time
|
||||||
@ -893,8 +987,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
when codex_enable_api_debug_peers:
|
when storage_enable_api_debug_peers:
|
||||||
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do(
|
router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do(
|
||||||
peerId: PeerId
|
peerId: PeerId
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -17,7 +17,7 @@ type
|
|||||||
proofProbability* {.serialize.}: UInt256
|
proofProbability* {.serialize.}: UInt256
|
||||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||||
collateralPerByte* {.serialize.}: UInt256
|
collateralPerByte* {.serialize.}: UInt256
|
||||||
expiry* {.serialize.}: ?uint64
|
expiry* {.serialize.}: uint64
|
||||||
nodes* {.serialize.}: ?uint
|
nodes* {.serialize.}: ?uint
|
||||||
tolerance* {.serialize.}: ?uint
|
tolerance* {.serialize.}: ?uint
|
||||||
|
|
||||||
@ -33,6 +33,8 @@ type
|
|||||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||||
totalCollateral* {.serialize.}: UInt256
|
totalCollateral* {.serialize.}: UInt256
|
||||||
freeSize* {.serialize.}: ?uint64
|
freeSize* {.serialize.}: ?uint64
|
||||||
|
enabled* {.serialize.}: ?bool
|
||||||
|
until* {.serialize.}: ?SecondsSince1970
|
||||||
|
|
||||||
RestSalesAgent* = object
|
RestSalesAgent* = object
|
||||||
state* {.serialize.}: string
|
state* {.serialize.}: string
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
## Copyright (c) 2021 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -7,10 +7,7 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
|
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/libp2p/crypto/crypto
|
import pkg/libp2p/crypto/crypto
|
||||||
import pkg/bearssl/rand
|
import pkg/bearssl/rand
|
||||||
|
|||||||
185
codex/sales.nim
185
codex/sales.nim
@ -22,7 +22,7 @@ import ./utils/exceptions
|
|||||||
## Sales holds a list of available storage that it may sell.
|
## Sales holds a list of available storage that it may sell.
|
||||||
##
|
##
|
||||||
## When storage is requested on the market that matches availability, the Sales
|
## When storage is requested on the market that matches availability, the Sales
|
||||||
## object will instruct the Codex node to persist the requested data. Once the
|
## object will instruct the Logos Storage node to persist the requested data. Once the
|
||||||
## data has been persisted, it uploads a proof of storage to the market in an
|
## data has been persisted, it uploads a proof of storage to the market in an
|
||||||
## attempt to win a storage contract.
|
## attempt to win a storage contract.
|
||||||
##
|
##
|
||||||
@ -105,19 +105,15 @@ proc new*(
|
|||||||
subscriptions: @[],
|
subscriptions: @[],
|
||||||
)
|
)
|
||||||
|
|
||||||
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
proc remove(sales: Sales, agent: SalesAgent) {.async: (raises: []).} =
|
||||||
await agent.stop()
|
await agent.stop()
|
||||||
|
|
||||||
if sales.running:
|
if sales.running:
|
||||||
sales.agents.keepItIf(it != agent)
|
sales.agents.keepItIf(it != agent)
|
||||||
|
|
||||||
proc cleanUp(
|
proc cleanUp(
|
||||||
sales: Sales,
|
sales: Sales, agent: SalesAgent, reprocessSlot: bool, returnedCollateral: ?UInt256
|
||||||
agent: SalesAgent,
|
) {.async: (raises: []).} =
|
||||||
returnBytes: bool,
|
|
||||||
reprocessSlot: bool,
|
|
||||||
returnedCollateral: ?UInt256,
|
|
||||||
processing: Future[void],
|
|
||||||
) {.async.} =
|
|
||||||
let data = agent.data
|
let data = agent.data
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
@ -132,72 +128,70 @@ proc cleanUp(
|
|||||||
# if reservation for the SalesAgent was not created, then it means
|
# if reservation for the SalesAgent was not created, then it means
|
||||||
# that the cleanUp was called before the sales process really started, so
|
# that the cleanUp was called before the sales process really started, so
|
||||||
# there are not really any bytes to be returned
|
# there are not really any bytes to be returned
|
||||||
if returnBytes and request =? data.request and reservation =? data.reservation:
|
if request =? data.request and reservation =? data.reservation:
|
||||||
if returnErr =? (
|
if returnErr =? (
|
||||||
await sales.context.reservations.returnBytesToAvailability(
|
await noCancel sales.context.reservations.returnBytesToAvailability(
|
||||||
reservation.availabilityId, reservation.id, request.ask.slotSize
|
reservation.availabilityId, reservation.id, request.ask.slotSize
|
||||||
)
|
)
|
||||||
).errorOption:
|
).errorOption:
|
||||||
error "failure returning bytes",
|
error "failure returning bytes",
|
||||||
error = returnErr.msg, bytes = request.ask.slotSize
|
error = returnErr.msg, bytes = request.ask.slotSize
|
||||||
|
|
||||||
# delete reservation and return reservation bytes back to the availability
|
# delete reservation and return reservation bytes back to the availability
|
||||||
if reservation =? data.reservation and
|
if reservation =? data.reservation and
|
||||||
deleteErr =? (
|
deleteErr =? (
|
||||||
await sales.context.reservations.deleteReservation(
|
await noCancel sales.context.reservations.deleteReservation(
|
||||||
reservation.id, reservation.availabilityId, returnedCollateral
|
reservation.id, reservation.availabilityId, returnedCollateral
|
||||||
)
|
)
|
||||||
).errorOption:
|
).errorOption:
|
||||||
error "failure deleting reservation", error = deleteErr.msg
|
error "failure deleting reservation", error = deleteErr.msg
|
||||||
|
|
||||||
if data.slotIndex > uint16.high.uint64:
|
|
||||||
error "Cannot cast slot index to uint16", slotIndex = data.slotIndex
|
|
||||||
return
|
|
||||||
|
|
||||||
# Re-add items back into the queue to prevent small availabilities from
|
# Re-add items back into the queue to prevent small availabilities from
|
||||||
# draining the queue. Seen items will be ordered last.
|
# draining the queue. Seen items will be ordered last.
|
||||||
if reprocessSlot and request =? data.request:
|
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
|
||||||
let queue = sales.context.slotQueue
|
let queue = sales.context.slotQueue
|
||||||
var seenItem = SlotQueueItem.init(
|
item.seen = true
|
||||||
data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true
|
|
||||||
)
|
|
||||||
trace "pushing ignored item to queue, marked as seen"
|
trace "pushing ignored item to queue, marked as seen"
|
||||||
if err =? queue.push(seenItem).errorOption:
|
if err =? queue.push(item).errorOption:
|
||||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||||
|
|
||||||
await sales.remove(agent)
|
let fut = sales.remove(agent)
|
||||||
|
sales.trackedFutures.track(fut)
|
||||||
|
|
||||||
# signal back to the slot queue to cycle a worker
|
proc filled(sales: Sales, request: StorageRequest, slotIndex: uint64) =
|
||||||
if not processing.isNil and not processing.finished():
|
|
||||||
processing.complete()
|
|
||||||
|
|
||||||
proc filled(
|
|
||||||
sales: Sales, request: StorageRequest, slotIndex: uint64, processing: Future[void]
|
|
||||||
) =
|
|
||||||
if onSale =? sales.context.onSale:
|
if onSale =? sales.context.onSale:
|
||||||
onSale(request, slotIndex)
|
onSale(request, slotIndex)
|
||||||
|
|
||||||
# signal back to the slot queue to cycle a worker
|
proc processSlot(
|
||||||
if not processing.isNil and not processing.finished():
|
sales: Sales, item: SlotQueueItem
|
||||||
processing.complete()
|
) {.async: (raises: [CancelledError]).} =
|
||||||
|
|
||||||
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
|
||||||
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
|
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
|
||||||
|
|
||||||
let agent =
|
let agent = newSalesAgent(
|
||||||
newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest)
|
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
|
||||||
|
)
|
||||||
|
|
||||||
|
let completed = newAsyncEvent()
|
||||||
|
|
||||||
agent.onCleanUp = proc(
|
agent.onCleanUp = proc(
|
||||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||||
) {.async.} =
|
) {.async: (raises: []).} =
|
||||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
|
trace "slot cleanup"
|
||||||
|
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
|
||||||
|
completed.fire()
|
||||||
|
|
||||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
|
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
|
||||||
sales.filled(request, slotIndex, done)
|
trace "slot filled"
|
||||||
|
sales.filled(request, slotIndex)
|
||||||
|
completed.fire()
|
||||||
|
|
||||||
agent.start(SalePreparing())
|
agent.start(SalePreparing())
|
||||||
sales.agents.add agent
|
sales.agents.add agent
|
||||||
|
|
||||||
|
trace "waiting for slot processing to complete"
|
||||||
|
await completed.wait()
|
||||||
|
trace "slot processing completed"
|
||||||
|
|
||||||
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
|
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
|
||||||
let reservations = sales.context.reservations
|
let reservations = sales.context.reservations
|
||||||
without reservs =? await reservations.all(Reservation):
|
without reservs =? await reservations.all(Reservation):
|
||||||
@ -256,12 +250,9 @@ proc load*(sales: Sales) {.async.} =
|
|||||||
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
||||||
|
|
||||||
agent.onCleanUp = proc(
|
agent.onCleanUp = proc(
|
||||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||||
) {.async.} =
|
) {.async: (raises: []).} =
|
||||||
# since workers are not being dispatched, this future has not been created
|
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
|
||||||
# by a worker. Create a dummy one here so we can call sales.cleanUp
|
|
||||||
let done: Future[void] = nil
|
|
||||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
|
|
||||||
|
|
||||||
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
||||||
# are inherently already filled and so assigning agent.onFilled would be
|
# are inherently already filled and so assigning agent.onFilled would be
|
||||||
@ -270,7 +261,9 @@ proc load*(sales: Sales) {.async.} =
|
|||||||
agent.start(SaleUnknown())
|
agent.start(SaleUnknown())
|
||||||
sales.agents.add agent
|
sales.agents.add agent
|
||||||
|
|
||||||
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
proc OnAvailabilitySaved(
|
||||||
|
sales: Sales, availability: Availability
|
||||||
|
) {.async: (raises: []).} =
|
||||||
## When availabilities are modified or added, the queue should be unpaused if
|
## When availabilities are modified or added, the queue should be unpaused if
|
||||||
## it was paused and any slots in the queue should have their `seen` flag
|
## it was paused and any slots in the queue should have their `seen` flag
|
||||||
## cleared.
|
## cleared.
|
||||||
@ -283,7 +276,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
|||||||
|
|
||||||
proc onStorageRequested(
|
proc onStorageRequested(
|
||||||
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||||
) =
|
) {.raises: [].} =
|
||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales onStorageRequested"
|
topics = "marketplace sales onStorageRequested"
|
||||||
requestId
|
requestId
|
||||||
@ -294,7 +287,14 @@ proc onStorageRequested(
|
|||||||
|
|
||||||
trace "storage requested, adding slots to queue"
|
trace "storage requested, adding slots to queue"
|
||||||
|
|
||||||
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
|
let market = sales.context.market
|
||||||
|
|
||||||
|
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
|
||||||
|
err:
|
||||||
|
error "Request failure, unable to calculate collateral", error = err.msg
|
||||||
|
return
|
||||||
|
|
||||||
|
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
|
||||||
if err of SlotsOutOfRangeError:
|
if err of SlotsOutOfRangeError:
|
||||||
warn "Too many slots, cannot add to queue"
|
warn "Too many slots, cannot add to queue"
|
||||||
else:
|
else:
|
||||||
@ -324,39 +324,54 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
|
|||||||
let market = context.market
|
let market = context.market
|
||||||
let queue = context.slotQueue
|
let queue = context.slotQueue
|
||||||
|
|
||||||
if slotIndex > uint16.high.uint64:
|
try:
|
||||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
without request =? (await market.getRequest(requestId)), err:
|
||||||
return
|
error "unknown request in contract", error = err.msgDetail
|
||||||
|
return
|
||||||
|
|
||||||
# first attempt to populate request using existing metadata in queue
|
# Take the repairing state into consideration to calculate the collateral.
|
||||||
without var found =? queue.populateItem(requestId, slotIndex.uint16):
|
# This is particularly needed because it will affect the priority in the queue
|
||||||
trace "no existing request metadata, getting request info from contract"
|
# and we want to give the user the ability to tweak the parameters.
|
||||||
# if there's no existing slot for that request, retrieve the request
|
# Adding the repairing state directly in the queue priority calculation
|
||||||
# from the contract.
|
# would not allow this flexibility.
|
||||||
try:
|
without collateral =?
|
||||||
without request =? await market.getRequest(requestId):
|
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
|
||||||
error "unknown request in contract"
|
error "Failed to add freed slot to queue: unable to calculate collateral",
|
||||||
return
|
error = err.msg
|
||||||
|
return
|
||||||
|
|
||||||
found = SlotQueueItem.init(request, slotIndex.uint16)
|
if slotIndex > uint16.high.uint64:
|
||||||
except CancelledError:
|
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||||
discard # do not propagate as addSlotToQueue was asyncSpawned
|
return
|
||||||
except CatchableError as e:
|
|
||||||
error "failed to get request from contract and add slots to queue",
|
|
||||||
error = e.msgDetail
|
|
||||||
|
|
||||||
if err =? queue.push(found).errorOption:
|
without slotQueueItem =?
|
||||||
error "failed to push slot items to queue", error = err.msgDetail
|
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
|
||||||
|
err:
|
||||||
|
warn "Too many slots, cannot add to queue", error = err.msgDetail
|
||||||
|
return
|
||||||
|
|
||||||
|
if err =? queue.push(slotQueueItem).errorOption:
|
||||||
|
if err of SlotQueueItemExistsError:
|
||||||
|
error "Failed to push item to queue because it already exists",
|
||||||
|
error = err.msgDetail
|
||||||
|
elif err of QueueNotRunningError:
|
||||||
|
warn "Failed to push item to queue because queue is not running",
|
||||||
|
error = err.msgDetail
|
||||||
|
except CancelledError as e:
|
||||||
|
trace "sales.addSlotToQueue was cancelled"
|
||||||
|
|
||||||
|
# We could get rid of this by adding the storage ask in the SlotFreed event,
|
||||||
|
# so we would not need to call getRequest to get the collateralPerSlot.
|
||||||
let fut = addSlotToQueue()
|
let fut = addSlotToQueue()
|
||||||
sales.trackedFutures.track(fut)
|
sales.trackedFutures.track(fut)
|
||||||
asyncSpawn fut
|
|
||||||
|
|
||||||
proc subscribeRequested(sales: Sales) {.async.} =
|
proc subscribeRequested(sales: Sales) {.async.} =
|
||||||
let context = sales.context
|
let context = sales.context
|
||||||
let market = context.market
|
let market = context.market
|
||||||
|
|
||||||
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) =
|
proc onStorageRequested(
|
||||||
|
requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||||
|
) {.raises: [].} =
|
||||||
sales.onStorageRequested(requestId, ask, expiry)
|
sales.onStorageRequested(requestId, ask, expiry)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -488,16 +503,20 @@ proc startSlotQueue(sales: Sales) =
|
|||||||
let slotQueue = sales.context.slotQueue
|
let slotQueue = sales.context.slotQueue
|
||||||
let reservations = sales.context.reservations
|
let reservations = sales.context.reservations
|
||||||
|
|
||||||
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
slotQueue.onProcessSlot = proc(item: SlotQueueItem) {.async: (raises: []).} =
|
||||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||||
sales.processSlot(item, done)
|
try:
|
||||||
|
await sales.processSlot(item)
|
||||||
|
except CancelledError:
|
||||||
|
discard
|
||||||
|
|
||||||
slotQueue.start()
|
slotQueue.start()
|
||||||
|
|
||||||
proc onAvailabilityAdded(availability: Availability) {.async.} =
|
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
|
||||||
await sales.onAvailabilityAdded(availability)
|
if availability.enabled:
|
||||||
|
await sales.OnAvailabilitySaved(availability)
|
||||||
|
|
||||||
reservations.onAvailabilityAdded = onAvailabilityAdded
|
reservations.OnAvailabilitySaved = OnAvailabilitySaved
|
||||||
|
|
||||||
proc subscribe(sales: Sales) {.async.} =
|
proc subscribe(sales: Sales) {.async.} =
|
||||||
await sales.subscribeRequested()
|
await sales.subscribeRequested()
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -27,17 +27,15 @@
|
|||||||
## | UInt256 | totalRemainingCollateral | |
|
## | UInt256 | totalRemainingCollateral | |
|
||||||
## +---------------------------------------------------+
|
## +---------------------------------------------------+
|
||||||
|
|
||||||
import pkg/upraises
|
{.push raises: [], gcsafe.}
|
||||||
push:
|
|
||||||
{.upraises: [].}
|
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
import std/sugar
|
||||||
import std/typetraits
|
import std/typetraits
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
import std/times
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/datastore
|
import pkg/datastore
|
||||||
import pkg/nimcrypto
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
@ -54,8 +52,10 @@ import ../units
|
|||||||
export requests
|
export requests
|
||||||
export logutils
|
export logutils
|
||||||
|
|
||||||
|
from nimcrypto import randomBytes
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "sales reservations"
|
topics = "marketplace sales reservations"
|
||||||
|
|
||||||
type
|
type
|
||||||
AvailabilityId* = distinct array[32, byte]
|
AvailabilityId* = distinct array[32, byte]
|
||||||
@ -70,6 +70,12 @@ type
|
|||||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||||
totalCollateral {.serialize.}: UInt256
|
totalCollateral {.serialize.}: UInt256
|
||||||
totalRemainingCollateral* {.serialize.}: UInt256
|
totalRemainingCollateral* {.serialize.}: UInt256
|
||||||
|
# If set to false, the availability will not accept new slots.
|
||||||
|
# If enabled, it will not impact any existing slots that are already being hosted.
|
||||||
|
enabled* {.serialize.}: bool
|
||||||
|
# Specifies the latest timestamp after which the availability will no longer host any slots.
|
||||||
|
# If set to 0, there will be no restrictions.
|
||||||
|
until* {.serialize.}: SecondsSince1970
|
||||||
|
|
||||||
Reservation* = ref object
|
Reservation* = ref object
|
||||||
id* {.serialize.}: ReservationId
|
id* {.serialize.}: ReservationId
|
||||||
@ -77,17 +83,18 @@ type
|
|||||||
size* {.serialize.}: uint64
|
size* {.serialize.}: uint64
|
||||||
requestId* {.serialize.}: RequestId
|
requestId* {.serialize.}: RequestId
|
||||||
slotIndex* {.serialize.}: uint64
|
slotIndex* {.serialize.}: uint64
|
||||||
|
validUntil* {.serialize.}: SecondsSince1970
|
||||||
|
|
||||||
Reservations* = ref object of RootObj
|
Reservations* = ref object of RootObj
|
||||||
availabilityLock: AsyncLock
|
availabilityLock: AsyncLock
|
||||||
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||||
repo: RepoStore
|
repo: RepoStore
|
||||||
onAvailabilityAdded: ?OnAvailabilityAdded
|
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||||
|
|
||||||
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
|
||||||
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
|
||||||
OnAvailabilityAdded* =
|
OnAvailabilitySaved* =
|
||||||
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
proc(availability: Availability): Future[void] {.async: (raises: []).}
|
||||||
StorableIter* = ref object
|
StorableIter* = ref object
|
||||||
finished*: bool
|
finished*: bool
|
||||||
next*: GetNext
|
next*: GetNext
|
||||||
@ -102,13 +109,20 @@ type
|
|||||||
SerializationError* = object of ReservationsError
|
SerializationError* = object of ReservationsError
|
||||||
UpdateFailedError* = object of ReservationsError
|
UpdateFailedError* = object of ReservationsError
|
||||||
BytesOutOfBoundsError* = object of ReservationsError
|
BytesOutOfBoundsError* = object of ReservationsError
|
||||||
|
UntilOutOfBoundsError* = object of ReservationsError
|
||||||
|
|
||||||
const
|
const
|
||||||
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
|
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
|
||||||
ReservationsKey = (SalesKey / "reservations").tryGet
|
ReservationsKey = (SalesKey / "reservations").tryGet
|
||||||
|
|
||||||
proc hash*(x: AvailabilityId): Hash {.borrow.}
|
proc hash*(x: AvailabilityId): Hash {.borrow.}
|
||||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.}
|
proc all*(
|
||||||
|
self: Reservations, T: type SomeStorableObject
|
||||||
|
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||||
|
|
||||||
|
proc all*(
|
||||||
|
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||||
|
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||||
|
|
||||||
template withLock(lock, body) =
|
template withLock(lock, body) =
|
||||||
try:
|
try:
|
||||||
@ -128,6 +142,8 @@ proc init*(
|
|||||||
duration: uint64,
|
duration: uint64,
|
||||||
minPricePerBytePerSecond: UInt256,
|
minPricePerBytePerSecond: UInt256,
|
||||||
totalCollateral: UInt256,
|
totalCollateral: UInt256,
|
||||||
|
enabled: bool,
|
||||||
|
until: SecondsSince1970,
|
||||||
): Availability =
|
): Availability =
|
||||||
var id: array[32, byte]
|
var id: array[32, byte]
|
||||||
doAssert randomBytes(id) == 32
|
doAssert randomBytes(id) == 32
|
||||||
@ -139,6 +155,8 @@ proc init*(
|
|||||||
minPricePerBytePerSecond: minPricePerBytePerSecond,
|
minPricePerBytePerSecond: minPricePerBytePerSecond,
|
||||||
totalCollateral: totalCollateral,
|
totalCollateral: totalCollateral,
|
||||||
totalRemainingCollateral: totalCollateral,
|
totalRemainingCollateral: totalCollateral,
|
||||||
|
enabled: enabled,
|
||||||
|
until: until,
|
||||||
)
|
)
|
||||||
|
|
||||||
func totalCollateral*(self: Availability): UInt256 {.inline.} =
|
func totalCollateral*(self: Availability): UInt256 {.inline.} =
|
||||||
@ -154,6 +172,7 @@ proc init*(
|
|||||||
size: uint64,
|
size: uint64,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: uint64,
|
slotIndex: uint64,
|
||||||
|
validUntil: SecondsSince1970,
|
||||||
): Reservation =
|
): Reservation =
|
||||||
var id: array[32, byte]
|
var id: array[32, byte]
|
||||||
doAssert randomBytes(id) == 32
|
doAssert randomBytes(id) == 32
|
||||||
@ -163,6 +182,7 @@ proc init*(
|
|||||||
size: size,
|
size: size,
|
||||||
requestId: requestId,
|
requestId: requestId,
|
||||||
slotIndex: slotIndex,
|
slotIndex: slotIndex,
|
||||||
|
validUntil: validUntil,
|
||||||
)
|
)
|
||||||
|
|
||||||
func toArray(id: SomeStorableId): array[32, byte] =
|
func toArray(id: SomeStorableId): array[32, byte] =
|
||||||
@ -189,10 +209,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId):
|
|||||||
logutils.formatIt(LogFormat.json, SomeStorableId):
|
logutils.formatIt(LogFormat.json, SomeStorableId):
|
||||||
it.to0xHexLog
|
it.to0xHexLog
|
||||||
|
|
||||||
proc `onAvailabilityAdded=`*(
|
proc `OnAvailabilitySaved=`*(
|
||||||
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
|
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
|
||||||
) =
|
) =
|
||||||
self.onAvailabilityAdded = some onAvailabilityAdded
|
self.OnAvailabilitySaved = some OnAvailabilitySaved
|
||||||
|
|
||||||
func key*(id: AvailabilityId): ?!Key =
|
func key*(id: AvailabilityId): ?!Key =
|
||||||
## sales / reservations / <availabilityId>
|
## sales / reservations / <availabilityId>
|
||||||
@ -206,6 +226,11 @@ func key*(availability: Availability): ?!Key =
|
|||||||
return availability.id.key
|
return availability.id.key
|
||||||
|
|
||||||
func maxCollateralPerByte*(availability: Availability): UInt256 =
|
func maxCollateralPerByte*(availability: Availability): UInt256 =
|
||||||
|
# If freeSize happens to be zero, we convention that the maxCollateralPerByte
|
||||||
|
# should be equal to totalRemainingCollateral.
|
||||||
|
if availability.freeSize == 0.uint64:
|
||||||
|
return availability.totalRemainingCollateral
|
||||||
|
|
||||||
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
|
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
|
||||||
|
|
||||||
func key*(reservation: Reservation): ?!Key =
|
func key*(reservation: Reservation): ?!Key =
|
||||||
@ -217,11 +242,19 @@ func available*(self: Reservations): uint =
|
|||||||
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
||||||
self.repo.available(bytes.NBytes)
|
self.repo.available(bytes.NBytes)
|
||||||
|
|
||||||
proc exists*(self: Reservations, key: Key): Future[bool] {.async.} =
|
proc exists*(
|
||||||
|
self: Reservations, key: Key
|
||||||
|
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||||
let exists = await self.repo.metaDs.ds.contains(key)
|
let exists = await self.repo.metaDs.ds.contains(key)
|
||||||
return exists
|
return exists
|
||||||
|
|
||||||
proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
iterator items(self: StorableIter): auto =
|
||||||
|
while not self.finished:
|
||||||
|
yield self.next()
|
||||||
|
|
||||||
|
proc getImpl(
|
||||||
|
self: Reservations, key: Key
|
||||||
|
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||||
if not await self.exists(key):
|
if not await self.exists(key):
|
||||||
let err =
|
let err =
|
||||||
newException(NotExistsError, "object with key " & $key & " does not exist")
|
newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||||
@ -234,7 +267,7 @@ proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
|||||||
|
|
||||||
proc get*(
|
proc get*(
|
||||||
self: Reservations, key: Key, T: type SomeStorableObject
|
self: Reservations, key: Key, T: type SomeStorableObject
|
||||||
): Future[?!T] {.async.} =
|
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||||
without serialized =? await self.getImpl(key), error:
|
without serialized =? await self.getImpl(key), error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
@ -243,7 +276,9 @@ proc get*(
|
|||||||
|
|
||||||
return success obj
|
return success obj
|
||||||
|
|
||||||
proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} =
|
proc updateImpl(
|
||||||
|
self: Reservations, obj: SomeStorableObject
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
trace "updating " & $(obj.type), id = obj.id
|
trace "updating " & $(obj.type), id = obj.id
|
||||||
|
|
||||||
without key =? obj.key, error:
|
without key =? obj.key, error:
|
||||||
@ -256,10 +291,15 @@ proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.a
|
|||||||
|
|
||||||
proc updateAvailability(
|
proc updateAvailability(
|
||||||
self: Reservations, obj: Availability
|
self: Reservations, obj: Availability
|
||||||
): Future[?!void] {.async.} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
logScope:
|
logScope:
|
||||||
availabilityId = obj.id
|
availabilityId = obj.id
|
||||||
|
|
||||||
|
if obj.until < 0:
|
||||||
|
let error =
|
||||||
|
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
without key =? obj.key, error:
|
without key =? obj.key, error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
@ -268,22 +308,26 @@ proc updateAvailability(
|
|||||||
trace "Creating new Availability"
|
trace "Creating new Availability"
|
||||||
let res = await self.updateImpl(obj)
|
let res = await self.updateImpl(obj)
|
||||||
# inform subscribers that Availability has been added
|
# inform subscribers that Availability has been added
|
||||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
await OnAvailabilitySaved(obj)
|
||||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
|
||||||
# certainty, that nothing will be raised
|
|
||||||
try:
|
|
||||||
await onAvailabilityAdded(obj)
|
|
||||||
except CancelledError as e:
|
|
||||||
raise e
|
|
||||||
except CatchableError as e:
|
|
||||||
# we don't have any insight into types of exceptions that
|
|
||||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
|
||||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
|
||||||
return res
|
return res
|
||||||
else:
|
else:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
|
if obj.until > 0:
|
||||||
|
without allReservations =? await self.all(Reservation, obj.id), error:
|
||||||
|
error.msg = "Error updating reservation: " & error.msg
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
|
let requestEnds = allReservations.mapIt(it.validUntil)
|
||||||
|
|
||||||
|
if requestEnds.len > 0 and requestEnds.max > obj.until:
|
||||||
|
let error = newException(
|
||||||
|
UntilOutOfBoundsError,
|
||||||
|
"Until parameter must be greater or equal to the longest currently hosted slot",
|
||||||
|
)
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
|
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
|
||||||
if oldAvailability.totalSize != obj.totalSize:
|
if oldAvailability.totalSize != obj.totalSize:
|
||||||
trace "totalSize changed, updating repo reservation"
|
trace "totalSize changed, updating repo reservation"
|
||||||
@ -300,32 +344,34 @@ proc updateAvailability(
|
|||||||
|
|
||||||
let res = await self.updateImpl(obj)
|
let res = await self.updateImpl(obj)
|
||||||
|
|
||||||
if oldAvailability.freeSize < obj.freeSize: # availability added
|
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
|
||||||
|
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
|
||||||
|
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
|
||||||
|
# availability updated
|
||||||
# inform subscribers that Availability has been modified (with increased
|
# inform subscribers that Availability has been modified (with increased
|
||||||
# size)
|
# size)
|
||||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
await OnAvailabilitySaved(obj)
|
||||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
|
||||||
# certainty, that nothing will be raised
|
|
||||||
try:
|
|
||||||
await onAvailabilityAdded(obj)
|
|
||||||
except CancelledError as e:
|
|
||||||
raise e
|
|
||||||
except CatchableError as e:
|
|
||||||
# we don't have any insight into types of exceptions that
|
|
||||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
|
||||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} =
|
proc update*(
|
||||||
|
self: Reservations, obj: Reservation
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
return await self.updateImpl(obj)
|
return await self.updateImpl(obj)
|
||||||
|
|
||||||
proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} =
|
proc update*(
|
||||||
withLock(self.availabilityLock):
|
self: Reservations, obj: Availability
|
||||||
return await self.updateAvailability(obj)
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
|
try:
|
||||||
|
withLock(self.availabilityLock):
|
||||||
|
return await self.updateAvailability(obj)
|
||||||
|
except AsyncLockError as e:
|
||||||
|
error "Lock error when trying to update the availability", err = e.msg
|
||||||
|
return failure(e)
|
||||||
|
|
||||||
proc delete(self: Reservations, key: Key): Future[?!void] {.async.} =
|
proc delete(
|
||||||
|
self: Reservations, key: Key
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
trace "deleting object", key
|
trace "deleting object", key
|
||||||
|
|
||||||
if not await self.exists(key):
|
if not await self.exists(key):
|
||||||
@ -341,25 +387,23 @@ proc deleteReservation*(
|
|||||||
reservationId: ReservationId,
|
reservationId: ReservationId,
|
||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
returnedCollateral: ?UInt256 = UInt256.none,
|
returnedCollateral: ?UInt256 = UInt256.none,
|
||||||
): Future[?!void] {.async.} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
logScope:
|
logScope:
|
||||||
reservationId
|
reservationId
|
||||||
availabilityId
|
availabilityId
|
||||||
|
|
||||||
trace "deleting reservation"
|
trace "deleting reservation"
|
||||||
|
|
||||||
without key =? key(reservationId, availabilityId), error:
|
without key =? key(reservationId, availabilityId), error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
withLock(self.availabilityLock):
|
try:
|
||||||
without reservation =? (await self.get(key, Reservation)), error:
|
withLock(self.availabilityLock):
|
||||||
if error of NotExistsError:
|
without reservation =? (await self.get(key, Reservation)), error:
|
||||||
return success()
|
if error of NotExistsError:
|
||||||
else:
|
return success()
|
||||||
return failure(error)
|
else:
|
||||||
|
return failure(error)
|
||||||
if reservation.size > 0.uint64:
|
|
||||||
trace "returning remaining reservation bytes to availability",
|
|
||||||
size = reservation.size
|
|
||||||
|
|
||||||
without availabilityKey =? availabilityId.key, error:
|
without availabilityKey =? availabilityId.key, error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
@ -367,7 +411,10 @@ proc deleteReservation*(
|
|||||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
availability.freeSize += reservation.size
|
if reservation.size > 0.uint64:
|
||||||
|
trace "returning remaining reservation bytes to availability",
|
||||||
|
size = reservation.size
|
||||||
|
availability.freeSize += reservation.size
|
||||||
|
|
||||||
if collateral =? returnedCollateral:
|
if collateral =? returnedCollateral:
|
||||||
availability.totalRemainingCollateral += collateral
|
availability.totalRemainingCollateral += collateral
|
||||||
@ -375,10 +422,13 @@ proc deleteReservation*(
|
|||||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||||
return failure(updateErr)
|
return failure(updateErr)
|
||||||
|
|
||||||
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
|
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
|
||||||
return failure(err.toErr(DeleteFailedError))
|
return failure(err.toErr(DeleteFailedError))
|
||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
except AsyncLockError as e:
|
||||||
|
error "Lock error when trying to delete the availability", err = e.msg
|
||||||
|
return failure(e)
|
||||||
|
|
||||||
# TODO: add support for deleting availabilities
|
# TODO: add support for deleting availabilities
|
||||||
# To delete, must not have any active sales.
|
# To delete, must not have any active sales.
|
||||||
@ -389,12 +439,20 @@ proc createAvailability*(
|
|||||||
duration: uint64,
|
duration: uint64,
|
||||||
minPricePerBytePerSecond: UInt256,
|
minPricePerBytePerSecond: UInt256,
|
||||||
totalCollateral: UInt256,
|
totalCollateral: UInt256,
|
||||||
): Future[?!Availability] {.async.} =
|
enabled: bool,
|
||||||
|
until: SecondsSince1970,
|
||||||
|
): Future[?!Availability] {.async: (raises: [CancelledError]).} =
|
||||||
trace "creating availability",
|
trace "creating availability",
|
||||||
size, duration, minPricePerBytePerSecond, totalCollateral
|
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||||
|
|
||||||
let availability =
|
if until < 0:
|
||||||
Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral)
|
let error =
|
||||||
|
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
|
let availability = Availability.init(
|
||||||
|
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||||
|
)
|
||||||
let bytes = availability.freeSize
|
let bytes = availability.freeSize
|
||||||
|
|
||||||
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||||
@ -418,114 +476,124 @@ method createReservation*(
|
|||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: uint64,
|
slotIndex: uint64,
|
||||||
collateralPerByte: UInt256,
|
collateralPerByte: UInt256,
|
||||||
): Future[?!Reservation] {.async, base.} =
|
validUntil: SecondsSince1970,
|
||||||
withLock(self.availabilityLock):
|
): Future[?!Reservation] {.async: (raises: [CancelledError]), base.} =
|
||||||
without availabilityKey =? availabilityId.key, error:
|
try:
|
||||||
return failure(error)
|
withLock(self.availabilityLock):
|
||||||
|
without availabilityKey =? availabilityId.key, error:
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
without availability =? await self.get(availabilityKey, Availability), error:
|
without availability =? await self.get(availabilityKey, Availability), error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
|
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
|
||||||
if availability.freeSize < slotSize:
|
if availability.freeSize < slotSize:
|
||||||
let error = newException(
|
let error = newException(
|
||||||
BytesOutOfBoundsError,
|
BytesOutOfBoundsError,
|
||||||
"trying to reserve an amount of bytes that is greater than the free size of the Availability",
|
"trying to reserve an amount of bytes that is greater than the free size of the Availability",
|
||||||
)
|
)
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
|
trace "Creating reservation",
|
||||||
|
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
|
||||||
|
|
||||||
let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex)
|
let reservation =
|
||||||
|
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
|
||||||
|
|
||||||
if createResErr =? (await self.update(reservation)).errorOption:
|
if createResErr =? (await self.update(reservation)).errorOption:
|
||||||
return failure(createResErr)
|
return failure(createResErr)
|
||||||
|
|
||||||
# reduce availability freeSize by the slot size, which is now accounted for in
|
# reduce availability freeSize by the slot size, which is now accounted for in
|
||||||
# the newly created Reservation
|
# the newly created Reservation
|
||||||
availability.freeSize -= slotSize
|
availability.freeSize -= slotSize
|
||||||
|
|
||||||
# adjust the remaining totalRemainingCollateral
|
# adjust the remaining totalRemainingCollateral
|
||||||
availability.totalRemainingCollateral -= slotSize.stuint(256) * collateralPerByte
|
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
|
||||||
|
|
||||||
# update availability with reduced size
|
# update availability with reduced size
|
||||||
trace "Updating availability with reduced size"
|
trace "Updating availability with reduced size", freeSize = availability.freeSize
|
||||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||||
trace "Updating availability failed, rolling back reservation creation"
|
trace "Updating availability failed, rolling back reservation creation"
|
||||||
|
|
||||||
without key =? reservation.key, keyError:
|
without key =? reservation.key, keyError:
|
||||||
keyError.parent = updateErr
|
keyError.parent = updateErr
|
||||||
return failure(keyError)
|
return failure(keyError)
|
||||||
|
|
||||||
# rollback the reservation creation
|
# rollback the reservation creation
|
||||||
if rollbackErr =? (await self.delete(key)).errorOption:
|
if rollbackErr =? (await self.delete(key)).errorOption:
|
||||||
rollbackErr.parent = updateErr
|
rollbackErr.parent = updateErr
|
||||||
return failure(rollbackErr)
|
return failure(rollbackErr)
|
||||||
|
|
||||||
return failure(updateErr)
|
return failure(updateErr)
|
||||||
|
|
||||||
trace "Reservation succesfully created"
|
trace "Reservation succesfully created"
|
||||||
return success(reservation)
|
return success(reservation)
|
||||||
|
except AsyncLockError as e:
|
||||||
|
error "Lock error when trying to delete the availability", err = e.msg
|
||||||
|
return failure(e)
|
||||||
|
|
||||||
proc returnBytesToAvailability*(
|
proc returnBytesToAvailability*(
|
||||||
self: Reservations,
|
self: Reservations,
|
||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
reservationId: ReservationId,
|
reservationId: ReservationId,
|
||||||
bytes: uint64,
|
bytes: uint64,
|
||||||
): Future[?!void] {.async.} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
logScope:
|
logScope:
|
||||||
reservationId
|
reservationId
|
||||||
availabilityId
|
availabilityId
|
||||||
|
try:
|
||||||
|
withLock(self.availabilityLock):
|
||||||
|
without key =? key(reservationId, availabilityId), error:
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
withLock(self.availabilityLock):
|
without var reservation =? (await self.get(key, Reservation)), error:
|
||||||
without key =? key(reservationId, availabilityId), error:
|
return failure(error)
|
||||||
return failure(error)
|
|
||||||
|
|
||||||
without var reservation =? (await self.get(key, Reservation)), error:
|
# We are ignoring bytes that are still present in the Reservation because
|
||||||
return failure(error)
|
# they will be returned to Availability through `deleteReservation`.
|
||||||
|
let bytesToBeReturned = bytes - reservation.size
|
||||||
|
|
||||||
# We are ignoring bytes that are still present in the Reservation because
|
if bytesToBeReturned == 0:
|
||||||
# they will be returned to Availability through `deleteReservation`.
|
trace "No bytes are returned",
|
||||||
let bytesToBeReturned = bytes - reservation.size
|
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||||
|
return success()
|
||||||
|
|
||||||
if bytesToBeReturned == 0:
|
trace "Returning bytes",
|
||||||
trace "No bytes are returned",
|
|
||||||
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||||
|
|
||||||
|
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
||||||
|
# is depleted then we will fail-fast as there is nothing to be done atm.
|
||||||
|
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
|
||||||
|
return failure(reserveErr.toErr(ReserveFailedError))
|
||||||
|
|
||||||
|
without availabilityKey =? availabilityId.key, error:
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
|
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||||
|
return failure(error)
|
||||||
|
|
||||||
|
availability.freeSize += bytesToBeReturned
|
||||||
|
|
||||||
|
# Update availability with returned size
|
||||||
|
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||||
|
trace "Rolling back returning bytes"
|
||||||
|
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
|
||||||
|
rollbackErr.parent = updateErr
|
||||||
|
return failure(rollbackErr)
|
||||||
|
|
||||||
|
return failure(updateErr)
|
||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
except AsyncLockError as e:
|
||||||
trace "Returning bytes",
|
error "Lock error when returning bytes to the availability", err = e.msg
|
||||||
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
return failure(e)
|
||||||
|
|
||||||
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
|
||||||
# is depleted then we will fail-fast as there is nothing to be done atm.
|
|
||||||
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
|
|
||||||
return failure(reserveErr.toErr(ReserveFailedError))
|
|
||||||
|
|
||||||
without availabilityKey =? availabilityId.key, error:
|
|
||||||
return failure(error)
|
|
||||||
|
|
||||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
|
||||||
return failure(error)
|
|
||||||
|
|
||||||
availability.freeSize += bytesToBeReturned
|
|
||||||
|
|
||||||
# Update availability with returned size
|
|
||||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
|
||||||
trace "Rolling back returning bytes"
|
|
||||||
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
|
|
||||||
rollbackErr.parent = updateErr
|
|
||||||
return failure(rollbackErr)
|
|
||||||
|
|
||||||
return failure(updateErr)
|
|
||||||
|
|
||||||
return success()
|
|
||||||
|
|
||||||
proc release*(
|
proc release*(
|
||||||
self: Reservations,
|
self: Reservations,
|
||||||
reservationId: ReservationId,
|
reservationId: ReservationId,
|
||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
bytes: uint,
|
bytes: uint,
|
||||||
): Future[?!void] {.async.} =
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
logScope:
|
logScope:
|
||||||
topics = "release"
|
topics = "release"
|
||||||
bytes
|
bytes
|
||||||
@ -563,13 +631,9 @@ proc release*(
|
|||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
iterator items(self: StorableIter): Future[?seq[byte]] =
|
|
||||||
while not self.finished:
|
|
||||||
yield self.next()
|
|
||||||
|
|
||||||
proc storables(
|
proc storables(
|
||||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||||
): Future[?!StorableIter] {.async.} =
|
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
|
||||||
var iter = StorableIter()
|
var iter = StorableIter()
|
||||||
let query = Query.init(queryKey)
|
let query = Query.init(queryKey)
|
||||||
when T is Availability:
|
when T is Availability:
|
||||||
@ -587,7 +651,7 @@ proc storables(
|
|||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
# /sales/reservations
|
# /sales/reservations
|
||||||
proc next(): Future[?seq[byte]] {.async.} =
|
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||||
await idleAsync()
|
await idleAsync()
|
||||||
iter.finished = results.finished
|
iter.finished = results.finished
|
||||||
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
||||||
@ -596,7 +660,7 @@ proc storables(
|
|||||||
|
|
||||||
return none seq[byte]
|
return none seq[byte]
|
||||||
|
|
||||||
proc dispose(): Future[?!void] {.async.} =
|
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
return await results.dispose()
|
return await results.dispose()
|
||||||
|
|
||||||
iter.next = next
|
iter.next = next
|
||||||
@ -605,32 +669,40 @@ proc storables(
|
|||||||
|
|
||||||
proc allImpl(
|
proc allImpl(
|
||||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||||
): Future[?!seq[T]] {.async.} =
|
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||||
var ret: seq[T] = @[]
|
var ret: seq[T] = @[]
|
||||||
|
|
||||||
without storables =? (await self.storables(T, queryKey)), error:
|
without storables =? (await self.storables(T, queryKey)), error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
for storable in storables.items:
|
for storable in storables.items:
|
||||||
without bytes =? (await storable):
|
try:
|
||||||
continue
|
without bytes =? (await storable):
|
||||||
|
continue
|
||||||
|
|
||||||
without obj =? T.fromJson(bytes), error:
|
without obj =? T.fromJson(bytes), error:
|
||||||
error "json deserialization error",
|
error "json deserialization error",
|
||||||
json = string.fromBytes(bytes), error = error.msg
|
json = string.fromBytes(bytes), error = error.msg
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ret.add obj
|
ret.add obj
|
||||||
|
except CancelledError as err:
|
||||||
|
raise err
|
||||||
|
except CatchableError as err:
|
||||||
|
error "Error when retrieving storable", error = err.msg
|
||||||
|
continue
|
||||||
|
|
||||||
return success(ret)
|
return success(ret)
|
||||||
|
|
||||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} =
|
proc all*(
|
||||||
|
self: Reservations, T: type SomeStorableObject
|
||||||
|
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||||
return await self.allImpl(T)
|
return await self.allImpl(T)
|
||||||
|
|
||||||
proc all*(
|
proc all*(
|
||||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||||
): Future[?!seq[T]] {.async.} =
|
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||||
without key =? (ReservationsKey / $availabilityId):
|
without key =? key(availabilityId):
|
||||||
return failure("no key")
|
return failure("no key")
|
||||||
|
|
||||||
return await self.allImpl(T, key)
|
return await self.allImpl(T, key)
|
||||||
@ -639,18 +711,22 @@ proc findAvailability*(
|
|||||||
self: Reservations,
|
self: Reservations,
|
||||||
size, duration: uint64,
|
size, duration: uint64,
|
||||||
pricePerBytePerSecond, collateralPerByte: UInt256,
|
pricePerBytePerSecond, collateralPerByte: UInt256,
|
||||||
): Future[?Availability] {.async.} =
|
validUntil: SecondsSince1970,
|
||||||
|
): Future[?Availability] {.async: (raises: [CancelledError]).} =
|
||||||
without storables =? (await self.storables(Availability)), e:
|
without storables =? (await self.storables(Availability)), e:
|
||||||
error "failed to get all storables", error = e.msg
|
error "failed to get all storables", error = e.msg
|
||||||
return none Availability
|
return none Availability
|
||||||
|
|
||||||
for item in storables.items:
|
for item in storables.items:
|
||||||
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
||||||
if size <= availability.freeSize and duration <= availability.duration and
|
if availability.enabled and size <= availability.freeSize and
|
||||||
|
duration <= availability.duration and
|
||||||
collateralPerByte <= availability.maxCollateralPerByte and
|
collateralPerByte <= availability.maxCollateralPerByte and
|
||||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond:
|
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
|
||||||
|
(availability.until == 0 or availability.until >= validUntil):
|
||||||
trace "availability matched",
|
trace "availability matched",
|
||||||
id = availability.id,
|
id = availability.id,
|
||||||
|
enabled = availability.enabled,
|
||||||
size,
|
size,
|
||||||
availFreeSize = availability.freeSize,
|
availFreeSize = availability.freeSize,
|
||||||
duration,
|
duration,
|
||||||
@ -658,7 +734,8 @@ proc findAvailability*(
|
|||||||
pricePerBytePerSecond,
|
pricePerBytePerSecond,
|
||||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||||
collateralPerByte,
|
collateralPerByte,
|
||||||
availMaxCollateralPerByte = availability.maxCollateralPerByte
|
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||||
|
until = availability.until
|
||||||
|
|
||||||
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
||||||
# to automatically dispose our iterators when they fall out of scope.
|
# to automatically dispose our iterators when they fall out of scope.
|
||||||
@ -670,6 +747,7 @@ proc findAvailability*(
|
|||||||
|
|
||||||
trace "availability did not match",
|
trace "availability did not match",
|
||||||
id = availability.id,
|
id = availability.id,
|
||||||
|
enabled = availability.enabled,
|
||||||
size,
|
size,
|
||||||
availFreeSize = availability.freeSize,
|
availFreeSize = availability.freeSize,
|
||||||
duration,
|
duration,
|
||||||
@ -677,4 +755,5 @@ proc findAvailability*(
|
|||||||
pricePerBytePerSecond,
|
pricePerBytePerSecond,
|
||||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||||
collateralPerByte,
|
collateralPerByte,
|
||||||
availMaxCollateralPerByte = availability.maxCollateralPerByte
|
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||||
|
until = availability.until
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import pkg/chronos
|
|||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/upraises
|
|
||||||
import ../contracts/requests
|
import ../contracts/requests
|
||||||
import ../errors
|
import ../errors
|
||||||
import ../logutils
|
import ../logutils
|
||||||
@ -11,6 +10,7 @@ import ./statemachine
|
|||||||
import ./salescontext
|
import ./salescontext
|
||||||
import ./salesdata
|
import ./salesdata
|
||||||
import ./reservations
|
import ./reservations
|
||||||
|
import ./slotqueue
|
||||||
|
|
||||||
export reservations
|
export reservations
|
||||||
|
|
||||||
@ -26,10 +26,10 @@ type
|
|||||||
onCleanUp*: OnCleanUp
|
onCleanUp*: OnCleanUp
|
||||||
onFilled*: ?OnFilled
|
onFilled*: ?OnFilled
|
||||||
|
|
||||||
OnCleanUp* = proc(
|
OnCleanUp* = proc(reprocessSlot = false, returnedCollateral = UInt256.none) {.
|
||||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
async: (raises: [])
|
||||||
): Future[void] {.gcsafe, upraises: [].}
|
.}
|
||||||
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||||
|
|
||||||
SalesAgentError = object of CodexError
|
SalesAgentError = object of CodexError
|
||||||
AllSlotsFilledError* = object of SalesAgentError
|
AllSlotsFilledError* = object of SalesAgentError
|
||||||
@ -42,10 +42,16 @@ proc newSalesAgent*(
|
|||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: uint64,
|
slotIndex: uint64,
|
||||||
request: ?StorageRequest,
|
request: ?StorageRequest,
|
||||||
|
slotQueueItem = SlotQueueItem.none,
|
||||||
): SalesAgent =
|
): SalesAgent =
|
||||||
var agent = SalesAgent.new()
|
var agent = SalesAgent.new()
|
||||||
agent.context = context
|
agent.context = context
|
||||||
agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request)
|
agent.data = SalesData(
|
||||||
|
requestId: requestId,
|
||||||
|
slotIndex: slotIndex,
|
||||||
|
request: request,
|
||||||
|
slotQueueItem: slotQueueItem,
|
||||||
|
)
|
||||||
return agent
|
return agent
|
||||||
|
|
||||||
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
||||||
@ -103,18 +109,15 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
|||||||
error "Error while waiting for expiry to lapse", error = e.msgDetail
|
error "Error while waiting for expiry to lapse", error = e.msgDetail
|
||||||
|
|
||||||
data.cancelled = onCancelled()
|
data.cancelled = onCancelled()
|
||||||
asyncSpawn data.cancelled
|
|
||||||
|
|
||||||
method onFulfilled*(
|
method onFulfilled*(
|
||||||
agent: SalesAgent, requestId: RequestId
|
agent: SalesAgent, requestId: RequestId
|
||||||
) {.base, gcsafe, upraises: [].} =
|
) {.base, gcsafe, raises: [].} =
|
||||||
let cancelled = agent.data.cancelled
|
let cancelled = agent.data.cancelled
|
||||||
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
|
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
|
||||||
cancelled.cancelSoon()
|
cancelled.cancelSoon()
|
||||||
|
|
||||||
method onFailed*(
|
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
|
||||||
agent: SalesAgent, requestId: RequestId
|
|
||||||
) {.base, gcsafe, upraises: [].} =
|
|
||||||
without request =? agent.data.request:
|
without request =? agent.data.request:
|
||||||
return
|
return
|
||||||
if agent.data.requestId == requestId:
|
if agent.data.requestId == requestId:
|
||||||
@ -122,7 +125,7 @@ method onFailed*(
|
|||||||
|
|
||||||
method onSlotFilled*(
|
method onSlotFilled*(
|
||||||
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
|
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
|
||||||
) {.base, gcsafe, upraises: [].} =
|
) {.base, gcsafe, raises: [].} =
|
||||||
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
|
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
|
||||||
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
||||||
|
|
||||||
@ -133,7 +136,7 @@ proc subscribe*(agent: SalesAgent) {.async.} =
|
|||||||
await agent.subscribeCancellation()
|
await agent.subscribeCancellation()
|
||||||
agent.subscribed = true
|
agent.subscribed = true
|
||||||
|
|
||||||
proc unsubscribe*(agent: SalesAgent) {.async.} =
|
proc unsubscribe*(agent: SalesAgent) {.async: (raises: []).} =
|
||||||
if not agent.subscribed:
|
if not agent.subscribed:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -144,6 +147,6 @@ proc unsubscribe*(agent: SalesAgent) {.async.} =
|
|||||||
|
|
||||||
agent.subscribed = false
|
agent.subscribed = false
|
||||||
|
|
||||||
proc stop*(agent: SalesAgent) {.async.} =
|
proc stop*(agent: SalesAgent) {.async: (raises: []).} =
|
||||||
await Machine(agent).stop()
|
await Machine(agent).stop()
|
||||||
await agent.unsubscribe()
|
await agent.unsubscribe()
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/upraises
|
|
||||||
import pkg/libp2p/cid
|
import pkg/libp2p/cid
|
||||||
|
|
||||||
import ../market
|
import ../market
|
||||||
@ -24,15 +23,20 @@ type
|
|||||||
slotQueue*: SlotQueue
|
slotQueue*: SlotQueue
|
||||||
simulateProofFailures*: int
|
simulateProofFailures*: int
|
||||||
|
|
||||||
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
|
BlocksCb* =
|
||||||
|
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||||
OnStore* = proc(
|
OnStore* = proc(
|
||||||
request: StorageRequest, slot: uint64, blocksCb: BlocksCb
|
request: StorageRequest,
|
||||||
): Future[?!void] {.gcsafe, upraises: [].}
|
expiry: SecondsSince1970,
|
||||||
|
slot: uint64,
|
||||||
|
blocksCb: BlocksCb,
|
||||||
|
isRepairing: bool,
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||||
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
||||||
gcsafe, upraises: []
|
async: (raises: [CancelledError])
|
||||||
.}
|
.}
|
||||||
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
|
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
|
||||||
gcsafe, upraises: []
|
async: (raises: [CancelledError])
|
||||||
.}
|
.}
|
||||||
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||||
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import pkg/chronos
|
|||||||
import ../contracts/requests
|
import ../contracts/requests
|
||||||
import ../market
|
import ../market
|
||||||
import ./reservations
|
import ./reservations
|
||||||
|
import ./slotqueue
|
||||||
|
|
||||||
type SalesData* = ref object
|
type SalesData* = ref object
|
||||||
requestId*: RequestId
|
requestId*: RequestId
|
||||||
@ -10,3 +11,4 @@ type SalesData* = ref object
|
|||||||
slotIndex*: uint64
|
slotIndex*: uint64
|
||||||
cancelled*: Future[void]
|
cancelled*: Future[void]
|
||||||
reservation*: ?Reservation
|
reservation*: ?Reservation
|
||||||
|
slotQueueItem*: ?SlotQueueItem
|
||||||
|
|||||||
@ -3,9 +3,7 @@ import std/tables
|
|||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/upraises
|
|
||||||
import ../errors
|
import ../errors
|
||||||
import ../clock
|
|
||||||
import ../logutils
|
import ../logutils
|
||||||
import ../rng
|
import ../rng
|
||||||
import ../utils
|
import ../utils
|
||||||
@ -17,25 +15,21 @@ logScope:
|
|||||||
topics = "marketplace slotqueue"
|
topics = "marketplace slotqueue"
|
||||||
|
|
||||||
type
|
type
|
||||||
OnProcessSlot* =
|
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
|
||||||
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].}
|
|
||||||
|
|
||||||
# Non-ref obj copies value when assigned, preventing accidental modification
|
# Non-ref obj copies value when assigned, preventing accidental modification
|
||||||
# of values which could cause an incorrect order (eg
|
# of values which could cause an incorrect order (eg
|
||||||
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
|
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
|
||||||
# but the heap invariant would no longer be honoured. When non-ref, the
|
# but the heap invariant would no longer be honoured. When non-ref, the
|
||||||
# compiler can ensure that statement will fail).
|
# compiler can ensure that statement will fail).
|
||||||
SlotQueueWorker = object
|
|
||||||
doneProcessing*: Future[void]
|
|
||||||
|
|
||||||
SlotQueueItem* = object
|
SlotQueueItem* = object
|
||||||
requestId: RequestId
|
requestId: RequestId
|
||||||
slotIndex: uint16
|
slotIndex: uint16
|
||||||
slotSize: uint64
|
slotSize: uint64
|
||||||
duration: uint64
|
duration: uint64
|
||||||
pricePerBytePerSecond: UInt256
|
pricePerBytePerSecond: UInt256
|
||||||
collateralPerByte: UInt256
|
collateral: UInt256 # Collateral computed
|
||||||
expiry: uint64
|
expiry: ?uint64
|
||||||
seen: bool
|
seen: bool
|
||||||
|
|
||||||
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
|
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
|
||||||
@ -47,7 +41,6 @@ type
|
|||||||
onProcessSlot: ?OnProcessSlot
|
onProcessSlot: ?OnProcessSlot
|
||||||
queue: AsyncHeapQueue[SlotQueueItem]
|
queue: AsyncHeapQueue[SlotQueueItem]
|
||||||
running: bool
|
running: bool
|
||||||
workers: AsyncQueue[SlotQueueWorker]
|
|
||||||
trackedFutures: TrackedFutures
|
trackedFutures: TrackedFutures
|
||||||
unpaused: AsyncEvent
|
unpaused: AsyncEvent
|
||||||
|
|
||||||
@ -76,9 +69,6 @@ proc profitability(item: SlotQueueItem): UInt256 =
|
|||||||
slotSize: item.slotSize,
|
slotSize: item.slotSize,
|
||||||
).pricePerSlot
|
).pricePerSlot
|
||||||
|
|
||||||
proc collateralPerSlot(item: SlotQueueItem): UInt256 =
|
|
||||||
StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot
|
|
||||||
|
|
||||||
proc `<`*(a, b: SlotQueueItem): bool =
|
proc `<`*(a, b: SlotQueueItem): bool =
|
||||||
# for A to have a higher priority than B (in a min queue), A must be less than
|
# for A to have a higher priority than B (in a min queue), A must be less than
|
||||||
# B.
|
# B.
|
||||||
@ -95,11 +85,12 @@ proc `<`*(a, b: SlotQueueItem): bool =
|
|||||||
scoreA.addIf(a.profitability > b.profitability, 3)
|
scoreA.addIf(a.profitability > b.profitability, 3)
|
||||||
scoreB.addIf(a.profitability < b.profitability, 3)
|
scoreB.addIf(a.profitability < b.profitability, 3)
|
||||||
|
|
||||||
scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2)
|
scoreA.addIf(a.collateral < b.collateral, 2)
|
||||||
scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2)
|
scoreB.addIf(a.collateral > b.collateral, 2)
|
||||||
|
|
||||||
scoreA.addIf(a.expiry > b.expiry, 1)
|
if expiryA =? a.expiry and expiryB =? b.expiry:
|
||||||
scoreB.addIf(a.expiry < b.expiry, 1)
|
scoreA.addIf(expiryA > expiryB, 1)
|
||||||
|
scoreB.addIf(expiryA < expiryB, 1)
|
||||||
|
|
||||||
return scoreA > scoreB
|
return scoreA > scoreB
|
||||||
|
|
||||||
@ -128,15 +119,13 @@ proc new*(
|
|||||||
# avoid instantiating `workers` in constructor to avoid side effects in
|
# avoid instantiating `workers` in constructor to avoid side effects in
|
||||||
# `newAsyncQueue` procedure
|
# `newAsyncQueue` procedure
|
||||||
|
|
||||||
proc init(_: type SlotQueueWorker): SlotQueueWorker =
|
|
||||||
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
|
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
_: type SlotQueueItem,
|
_: type SlotQueueItem,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: uint16,
|
slotIndex: uint16,
|
||||||
ask: StorageAsk,
|
ask: StorageAsk,
|
||||||
expiry: uint64,
|
expiry: ?uint64,
|
||||||
|
collateral: UInt256,
|
||||||
seen = false,
|
seen = false,
|
||||||
): SlotQueueItem =
|
): SlotQueueItem =
|
||||||
SlotQueueItem(
|
SlotQueueItem(
|
||||||
@ -145,25 +134,43 @@ proc init*(
|
|||||||
slotSize: ask.slotSize,
|
slotSize: ask.slotSize,
|
||||||
duration: ask.duration,
|
duration: ask.duration,
|
||||||
pricePerBytePerSecond: ask.pricePerBytePerSecond,
|
pricePerBytePerSecond: ask.pricePerBytePerSecond,
|
||||||
collateralPerByte: ask.collateralPerByte,
|
collateral: collateral,
|
||||||
expiry: expiry,
|
expiry: expiry,
|
||||||
seen: seen,
|
seen: seen,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16
|
_: type SlotQueueItem,
|
||||||
|
requestId: RequestId,
|
||||||
|
slotIndex: uint16,
|
||||||
|
ask: StorageAsk,
|
||||||
|
expiry: uint64,
|
||||||
|
collateral: UInt256,
|
||||||
|
seen = false,
|
||||||
): SlotQueueItem =
|
): SlotQueueItem =
|
||||||
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry)
|
SlotQueueItem.init(requestId, slotIndex, ask, some expiry, collateral, seen)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
_: type SlotQueueItem,
|
||||||
): seq[SlotQueueItem] =
|
request: StorageRequest,
|
||||||
|
slotIndex: uint16,
|
||||||
|
collateral: UInt256,
|
||||||
|
): SlotQueueItem =
|
||||||
|
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
|
||||||
|
|
||||||
|
proc init*(
|
||||||
|
_: type SlotQueueItem,
|
||||||
|
requestId: RequestId,
|
||||||
|
ask: StorageAsk,
|
||||||
|
expiry: ?uint64,
|
||||||
|
collateral: UInt256,
|
||||||
|
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
|
||||||
if not ask.slots.inRange:
|
if not ask.slots.inRange:
|
||||||
raise newException(SlotsOutOfRangeError, "Too many slots")
|
raise newException(SlotsOutOfRangeError, "Too many slots")
|
||||||
|
|
||||||
var i = 0'u16
|
var i = 0'u16
|
||||||
proc initSlotQueueItem(): SlotQueueItem =
|
proc initSlotQueueItem(): SlotQueueItem =
|
||||||
let item = SlotQueueItem.init(requestId, i, ask, expiry)
|
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
|
||||||
inc i
|
inc i
|
||||||
return item
|
return item
|
||||||
|
|
||||||
@ -171,8 +178,19 @@ proc init*(
|
|||||||
Rng.instance.shuffle(items)
|
Rng.instance.shuffle(items)
|
||||||
return items
|
return items
|
||||||
|
|
||||||
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] =
|
proc init*(
|
||||||
return SlotQueueItem.init(request.id, request.ask, request.expiry)
|
_: type SlotQueueItem,
|
||||||
|
requestId: RequestId,
|
||||||
|
ask: StorageAsk,
|
||||||
|
expiry: uint64,
|
||||||
|
collateral: UInt256,
|
||||||
|
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
|
||||||
|
SlotQueueItem.init(requestId, ask, some expiry, collateral)
|
||||||
|
|
||||||
|
proc init*(
|
||||||
|
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
|
||||||
|
): seq[SlotQueueItem] =
|
||||||
|
return SlotQueueItem.init(request.id, request.ask, uint64.none, collateral)
|
||||||
|
|
||||||
proc inRange*(val: SomeUnsignedInt): bool =
|
proc inRange*(val: SomeUnsignedInt): bool =
|
||||||
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
||||||
@ -198,6 +216,9 @@ proc collateralPerByte*(self: SlotQueueItem): UInt256 =
|
|||||||
proc seen*(self: SlotQueueItem): bool =
|
proc seen*(self: SlotQueueItem): bool =
|
||||||
self.seen
|
self.seen
|
||||||
|
|
||||||
|
proc `seen=`*(self: var SlotQueueItem, seen: bool) =
|
||||||
|
self.seen = seen
|
||||||
|
|
||||||
proc running*(self: SlotQueue): bool =
|
proc running*(self: SlotQueue): bool =
|
||||||
self.running
|
self.running
|
||||||
|
|
||||||
@ -216,13 +237,6 @@ proc `$`*(self: SlotQueue): string =
|
|||||||
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
|
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
|
||||||
self.onProcessSlot = some onProcessSlot
|
self.onProcessSlot = some onProcessSlot
|
||||||
|
|
||||||
proc activeWorkers*(self: SlotQueue): int =
|
|
||||||
if not self.running:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# active = capacity - available
|
|
||||||
self.maxWorkers - self.workers.len
|
|
||||||
|
|
||||||
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
|
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
|
||||||
self.queue.contains(item)
|
self.queue.contains(item)
|
||||||
|
|
||||||
@ -234,25 +248,7 @@ proc unpause*(self: SlotQueue) =
|
|||||||
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
||||||
self.unpaused.fire()
|
self.unpaused.fire()
|
||||||
|
|
||||||
proc populateItem*(
|
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
|
||||||
self: SlotQueue, requestId: RequestId, slotIndex: uint16
|
|
||||||
): ?SlotQueueItem =
|
|
||||||
trace "populate item, items in queue", len = self.queue.len
|
|
||||||
for item in self.queue.items:
|
|
||||||
trace "populate item search", itemRequestId = item.requestId, requestId
|
|
||||||
if item.requestId == requestId:
|
|
||||||
return some SlotQueueItem(
|
|
||||||
requestId: requestId,
|
|
||||||
slotIndex: slotIndex,
|
|
||||||
slotSize: item.slotSize,
|
|
||||||
duration: item.duration,
|
|
||||||
pricePerBytePerSecond: item.pricePerBytePerSecond,
|
|
||||||
collateralPerByte: item.collateralPerByte,
|
|
||||||
expiry: item.expiry,
|
|
||||||
)
|
|
||||||
return none SlotQueueItem
|
|
||||||
|
|
||||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
|
|
||||||
logScope:
|
logScope:
|
||||||
requestId = item.requestId
|
requestId = item.requestId
|
||||||
slotIndex = item.slotIndex
|
slotIndex = item.slotIndex
|
||||||
@ -324,52 +320,6 @@ proc delete*(self: SlotQueue, requestId: RequestId) =
|
|||||||
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
|
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
|
||||||
self.queue[i]
|
self.queue[i]
|
||||||
|
|
||||||
proc addWorker(self: SlotQueue): ?!void =
|
|
||||||
if not self.running:
|
|
||||||
let err = newException(QueueNotRunningError, "queue must be running")
|
|
||||||
return failure(err)
|
|
||||||
|
|
||||||
trace "adding new worker to worker queue"
|
|
||||||
|
|
||||||
let worker = SlotQueueWorker.init()
|
|
||||||
try:
|
|
||||||
self.trackedFutures.track(worker.doneProcessing)
|
|
||||||
self.workers.addLastNoWait(worker)
|
|
||||||
except AsyncQueueFullError:
|
|
||||||
return failure("failed to add worker, worker queue full")
|
|
||||||
|
|
||||||
return success()
|
|
||||||
|
|
||||||
proc dispatch(
|
|
||||||
self: SlotQueue, worker: SlotQueueWorker, item: SlotQueueItem
|
|
||||||
) {.async: (raises: []).} =
|
|
||||||
logScope:
|
|
||||||
requestId = item.requestId
|
|
||||||
slotIndex = item.slotIndex
|
|
||||||
|
|
||||||
if not self.running:
|
|
||||||
warn "Could not dispatch worker because queue is not running"
|
|
||||||
return
|
|
||||||
|
|
||||||
if onProcessSlot =? self.onProcessSlot:
|
|
||||||
try:
|
|
||||||
self.trackedFutures.track(worker.doneProcessing)
|
|
||||||
await onProcessSlot(item, worker.doneProcessing)
|
|
||||||
await worker.doneProcessing
|
|
||||||
|
|
||||||
if err =? self.addWorker().errorOption:
|
|
||||||
raise err # catch below
|
|
||||||
except QueueNotRunningError as e:
|
|
||||||
info "could not re-add worker to worker queue, queue not running", error = e.msg
|
|
||||||
except CancelledError:
|
|
||||||
# do not bubble exception up as it is called with `asyncSpawn` which would
|
|
||||||
# convert the exception into a `FutureDefect`
|
|
||||||
discard
|
|
||||||
except CatchableError as e:
|
|
||||||
# we don't have any insight into types of errors that `onProcessSlot` can
|
|
||||||
# throw because it is caller-defined
|
|
||||||
warn "Unknown error processing slot in worker", error = e.msg
|
|
||||||
|
|
||||||
proc clearSeenFlags*(self: SlotQueue) =
|
proc clearSeenFlags*(self: SlotQueue) =
|
||||||
# Enumerate all items in the queue, overwriting each item with `seen = false`.
|
# Enumerate all items in the queue, overwriting each item with `seen = false`.
|
||||||
# To avoid issues with new queue items being pushed to the queue while all
|
# To avoid issues with new queue items being pushed to the queue while all
|
||||||
@ -387,7 +337,8 @@ proc clearSeenFlags*(self: SlotQueue) =
|
|||||||
|
|
||||||
trace "all 'seen' flags cleared"
|
trace "all 'seen' flags cleared"
|
||||||
|
|
||||||
proc run(self: SlotQueue) {.async: (raises: []).} =
|
proc runWorker(self: SlotQueue) {.async: (raises: []).} =
|
||||||
|
trace "slot queue worker loop started"
|
||||||
while self.running:
|
while self.running:
|
||||||
try:
|
try:
|
||||||
if self.paused:
|
if self.paused:
|
||||||
@ -396,8 +347,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
|
|||||||
# block until unpaused is true/fired, ie wait for queue to be unpaused
|
# block until unpaused is true/fired, ie wait for queue to be unpaused
|
||||||
await self.unpaused.wait()
|
await self.unpaused.wait()
|
||||||
|
|
||||||
let worker =
|
|
||||||
await self.workers.popFirst() # if workers saturated, wait here for new workers
|
|
||||||
let item = await self.queue.pop() # if queue empty, wait here for new items
|
let item = await self.queue.pop() # if queue empty, wait here for new items
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
@ -420,24 +369,19 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
|
|||||||
# immediately (with priority over other items) once unpaused
|
# immediately (with priority over other items) once unpaused
|
||||||
trace "readding seen item back into the queue"
|
trace "readding seen item back into the queue"
|
||||||
discard self.push(item) # on error, drop the item and continue
|
discard self.push(item) # on error, drop the item and continue
|
||||||
worker.doneProcessing.complete()
|
|
||||||
if err =? self.addWorker().errorOption:
|
|
||||||
error "error adding new worker", error = err.msg
|
|
||||||
await sleepAsync(1.millis) # poll
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
trace "processing item"
|
trace "processing item"
|
||||||
|
without onProcessSlot =? self.onProcessSlot:
|
||||||
|
raiseAssert "slot queue onProcessSlot not set"
|
||||||
|
|
||||||
let fut = self.dispatch(worker, item)
|
await onProcessSlot(item)
|
||||||
self.trackedFutures.track(fut)
|
|
||||||
asyncSpawn fut
|
|
||||||
|
|
||||||
await sleepAsync(1.millis) # poll
|
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
trace "slot queue cancelled"
|
trace "slot queue worker cancelled"
|
||||||
break
|
break
|
||||||
except CatchableError as e: # raised from self.queue.pop() or self.workers.pop()
|
except CatchableError as e: # raised from self.queue.pop()
|
||||||
warn "slot queue error encountered during processing", error = e.msg
|
warn "slot queue worker error encountered during processing", error = e.msg
|
||||||
|
trace "slot queue worker loop stopped"
|
||||||
|
|
||||||
proc start*(self: SlotQueue) =
|
proc start*(self: SlotQueue) =
|
||||||
if self.running:
|
if self.running:
|
||||||
@ -447,18 +391,11 @@ proc start*(self: SlotQueue) =
|
|||||||
|
|
||||||
self.running = true
|
self.running = true
|
||||||
|
|
||||||
# must be called in `start` to avoid sideeffects in `new`
|
|
||||||
self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers)
|
|
||||||
|
|
||||||
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
|
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
|
||||||
# task, a new worker will be pushed to the queue
|
# task, a new worker will be pushed to the queue
|
||||||
for i in 0 ..< self.maxWorkers:
|
for i in 0 ..< self.maxWorkers:
|
||||||
if err =? self.addWorker().errorOption:
|
let worker = self.runWorker()
|
||||||
error "start: error adding new worker", error = err.msg
|
self.trackedFutures.track(worker)
|
||||||
|
|
||||||
let fut = self.run()
|
|
||||||
self.trackedFutures.track(fut)
|
|
||||||
asyncSpawn fut
|
|
||||||
|
|
||||||
proc stop*(self: SlotQueue) {.async.} =
|
proc stop*(self: SlotQueue) {.async.} =
|
||||||
if not self.running:
|
if not self.running:
|
||||||
|
|||||||
@ -1,5 +1,4 @@
|
|||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/upraises
|
|
||||||
import ../errors
|
import ../errors
|
||||||
import ../utils/asyncstatemachine
|
import ../utils/asyncstatemachine
|
||||||
import ../market
|
import ../market
|
||||||
@ -12,21 +11,21 @@ export asyncstatemachine
|
|||||||
|
|
||||||
type
|
type
|
||||||
SaleState* = ref object of State
|
SaleState* = ref object of State
|
||||||
SaleError* = ref object of CodexError
|
SaleError* = object of CodexError
|
||||||
|
|
||||||
method onCancelled*(
|
method onCancelled*(
|
||||||
state: SaleState, request: StorageRequest
|
state: SaleState, request: StorageRequest
|
||||||
): ?State {.base, upraises: [].} =
|
): ?State {.base, raises: [].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
method onFailed*(
|
method onFailed*(
|
||||||
state: SaleState, request: StorageRequest
|
state: SaleState, request: StorageRequest
|
||||||
): ?State {.base, upraises: [].} =
|
): ?State {.base, raises: [].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
method onSlotFilled*(
|
method onSlotFilled*(
|
||||||
state: SaleState, requestId: RequestId, slotIndex: uint64
|
state: SaleState, requestId: RequestId, slotIndex: uint64
|
||||||
): ?State {.base, upraises: [].} =
|
): ?State {.base, raises: [].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc cancelledEvent*(request: StorageRequest): Event =
|
proc cancelledEvent*(request: StorageRequest): Event =
|
||||||
|
|||||||
@ -12,6 +12,14 @@ type SaleCancelled* = ref object of SaleState
|
|||||||
method `$`*(state: SaleCancelled): string =
|
method `$`*(state: SaleCancelled): string =
|
||||||
"SaleCancelled"
|
"SaleCancelled"
|
||||||
|
|
||||||
|
proc slotIsFilledByMe(
|
||||||
|
market: Market, requestId: RequestId, slotIndex: uint64
|
||||||
|
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
|
||||||
|
let host = await market.getHost(requestId, slotIndex)
|
||||||
|
let me = await market.getSigner()
|
||||||
|
|
||||||
|
return host == me.some
|
||||||
|
|
||||||
method run*(
|
method run*(
|
||||||
state: SaleCancelled, machine: Machine
|
state: SaleCancelled, machine: Machine
|
||||||
): Future[?State] {.async: (raises: []).} =
|
): Future[?State] {.async: (raises: []).} =
|
||||||
@ -23,21 +31,27 @@ method run*(
|
|||||||
raiseAssert "no sale request"
|
raiseAssert "no sale request"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
var returnedCollateral = UInt256.none
|
||||||
debug "Collecting collateral and partial payout",
|
|
||||||
requestId = data.requestId, slotIndex = data.slotIndex
|
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
|
||||||
let currentCollateral = await market.currentCollateral(slot.id)
|
debug "Collecting collateral and partial payout",
|
||||||
await market.freeSlot(slot.id)
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
|
||||||
|
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||||
|
let currentCollateral = await market.currentCollateral(slot.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await market.freeSlot(slot.id)
|
||||||
|
except SlotStateMismatchError as e:
|
||||||
|
warn "Failed to free slot because slot is already free", error = e.msg
|
||||||
|
|
||||||
|
returnedCollateral = currentCollateral.some
|
||||||
|
|
||||||
if onClear =? agent.context.onClear and request =? data.request:
|
if onClear =? agent.context.onClear and request =? data.request:
|
||||||
onClear(request, data.slotIndex)
|
onClear(request, data.slotIndex)
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(
|
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
|
||||||
returnBytes = true,
|
|
||||||
reprocessSlot = false,
|
|
||||||
returnedCollateral = some currentCollateral,
|
|
||||||
)
|
|
||||||
|
|
||||||
warn "Sale cancelled due to timeout",
|
warn "Sale cancelled due to timeout",
|
||||||
requestId = data.requestId, slotIndex = data.slotIndex
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
|||||||
@ -38,6 +38,7 @@ method run*(
|
|||||||
let agent = SalesAgent(machine)
|
let agent = SalesAgent(machine)
|
||||||
let data = agent.data
|
let data = agent.data
|
||||||
let context = agent.context
|
let context = agent.context
|
||||||
|
let market = context.market
|
||||||
let reservations = context.reservations
|
let reservations = context.reservations
|
||||||
|
|
||||||
without onStore =? context.onStore:
|
without onStore =? context.onStore:
|
||||||
@ -55,7 +56,9 @@ method run*(
|
|||||||
reservationId = reservation.id
|
reservationId = reservation.id
|
||||||
availabilityId = reservation.availabilityId
|
availabilityId = reservation.availabilityId
|
||||||
|
|
||||||
proc onBlocks(blocks: seq[bt.Block]): Future[?!void] {.async.} =
|
proc onBlocks(
|
||||||
|
blocks: seq[bt.Block]
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
# release batches of blocks as they are written to disk and
|
# release batches of blocks as they are written to disk and
|
||||||
# update availability size
|
# update availability size
|
||||||
var bytes: uint = 0
|
var bytes: uint = 0
|
||||||
@ -67,8 +70,21 @@ method run*(
|
|||||||
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
|
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
let requestId = request.id
|
||||||
|
let slotId = slotId(requestId, data.slotIndex)
|
||||||
|
let requestState = await market.requestState(requestId)
|
||||||
|
let isRepairing = (await market.slotState(slotId)) == SlotState.Repair
|
||||||
|
|
||||||
|
trace "Retrieving expiry"
|
||||||
|
var expiry: SecondsSince1970
|
||||||
|
if state =? requestState and state == RequestState.Started:
|
||||||
|
expiry = await market.getRequestEnd(requestId)
|
||||||
|
else:
|
||||||
|
expiry = await market.requestExpiresAt(requestId)
|
||||||
|
|
||||||
trace "Starting download"
|
trace "Starting download"
|
||||||
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption:
|
if err =?
|
||||||
|
(await onStore(request, expiry, data.slotIndex, onBlocks, isRepairing)).errorOption:
|
||||||
return some State(SaleErrored(error: err, reprocessSlot: false))
|
return some State(SaleErrored(error: err, reprocessSlot: false))
|
||||||
|
|
||||||
trace "Download complete"
|
trace "Download complete"
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/upraises
|
|
||||||
|
|
||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ../salesagent
|
import ../salesagent
|
||||||
@ -34,7 +33,7 @@ method run*(
|
|||||||
onClear(request, data.slotIndex)
|
onClear(request, data.slotIndex)
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)
|
await onCleanUp(reprocessSlot = state.reprocessSlot)
|
||||||
except CancelledError as e:
|
except CancelledError as e:
|
||||||
trace "SaleErrored.run was cancelled", error = e.msgDetail
|
trace "SaleErrored.run was cancelled", error = e.msgDetail
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
|
|||||||
@ -28,6 +28,7 @@ method run*(
|
|||||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||||
debug "Removing slot from mySlots",
|
debug "Removing slot from mySlots",
|
||||||
requestId = data.requestId, slotIndex = data.slotIndex
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
|
||||||
await market.freeSlot(slot.id)
|
await market.freeSlot(slot.id)
|
||||||
|
|
||||||
let error = newException(SaleFailedError, "Sale failed")
|
let error = newException(SaleFailedError, "Sale failed")
|
||||||
|
|||||||
@ -11,7 +11,7 @@ import ./cancelled
|
|||||||
import ./failed
|
import ./failed
|
||||||
import ./proving
|
import ./proving
|
||||||
|
|
||||||
when codex_enable_proof_failures:
|
when storage_enable_proof_failures:
|
||||||
import ./provingsimulated
|
import ./provingsimulated
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
@ -59,7 +59,7 @@ method run*(
|
|||||||
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
|
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
|
||||||
return some State(SaleErrored(error: err))
|
return some State(SaleErrored(error: err))
|
||||||
|
|
||||||
when codex_enable_proof_failures:
|
when storage_enable_proof_failures:
|
||||||
if context.simulateProofFailures > 0:
|
if context.simulateProofFailures > 0:
|
||||||
info "Proving with failure rate", rate = context.simulateProofFailures
|
info "Proving with failure rate", rate = context.simulateProofFailures
|
||||||
return some State(
|
return some State(
|
||||||
|
|||||||
@ -30,6 +30,7 @@ method run*(
|
|||||||
): Future[?State] {.async: (raises: []).} =
|
): Future[?State] {.async: (raises: []).} =
|
||||||
let data = SalesAgent(machine).data
|
let data = SalesAgent(machine).data
|
||||||
let market = SalesAgent(machine).context.market
|
let market = SalesAgent(machine).context.market
|
||||||
|
|
||||||
without (request =? data.request):
|
without (request =? data.request):
|
||||||
raiseAssert "Request not set"
|
raiseAssert "Request not set"
|
||||||
|
|
||||||
@ -38,28 +39,20 @@ method run*(
|
|||||||
slotIndex = data.slotIndex
|
slotIndex = data.slotIndex
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
|
without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
|
||||||
let requestedCollateral = request.ask.collateralPerSlot
|
err:
|
||||||
var collateral: UInt256
|
error "Failure attempting to fill slot: unable to calculate collateral",
|
||||||
|
error = err.msg
|
||||||
if slotState == SlotState.Repair:
|
return some State(SaleErrored(error: err))
|
||||||
# When repairing the node gets "discount" on the collateral that it needs to
|
|
||||||
let repairRewardPercentage = (await market.repairRewardPercentage).u256
|
|
||||||
collateral =
|
|
||||||
requestedCollateral -
|
|
||||||
((requestedCollateral * repairRewardPercentage)).div(100.u256)
|
|
||||||
else:
|
|
||||||
collateral = requestedCollateral
|
|
||||||
|
|
||||||
debug "Filling slot"
|
debug "Filling slot"
|
||||||
try:
|
try:
|
||||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||||
|
except SlotStateMismatchError as e:
|
||||||
|
debug "Slot is already filled, ignoring slot"
|
||||||
|
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
|
||||||
except MarketError as e:
|
except MarketError as e:
|
||||||
if e.msg.contains "Slot is not free":
|
return some State(SaleErrored(error: e))
|
||||||
debug "Slot is already filled, ignoring slot"
|
|
||||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
|
||||||
else:
|
|
||||||
return some State(SaleErrored(error: e))
|
|
||||||
# other CatchableErrors are handled "automatically" by the SaleState
|
# other CatchableErrors are handled "automatically" by the SaleState
|
||||||
|
|
||||||
return some State(SaleFilled())
|
return some State(SaleFilled())
|
||||||
|
|||||||
@ -36,6 +36,9 @@ method run*(
|
|||||||
requestId = data.requestId, slotIndex = data.slotIndex
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if onClear =? agent.context.onClear:
|
||||||
|
onClear(request, data.slotIndex)
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(returnedCollateral = state.returnedCollateral)
|
await onCleanUp(returnedCollateral = state.returnedCollateral)
|
||||||
except CancelledError as e:
|
except CancelledError as e:
|
||||||
|
|||||||
@ -14,7 +14,7 @@ logScope:
|
|||||||
|
|
||||||
type SaleIgnored* = ref object of SaleState
|
type SaleIgnored* = ref object of SaleState
|
||||||
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
||||||
returnBytes*: bool # return unreleased bytes from Reservation to Availability
|
returnsCollateral*: bool # returns collateral when a reservation was created
|
||||||
|
|
||||||
method `$`*(state: SaleIgnored): string =
|
method `$`*(state: SaleIgnored): string =
|
||||||
"SaleIgnored"
|
"SaleIgnored"
|
||||||
@ -23,11 +23,26 @@ method run*(
|
|||||||
state: SaleIgnored, machine: Machine
|
state: SaleIgnored, machine: Machine
|
||||||
): Future[?State] {.async: (raises: []).} =
|
): Future[?State] {.async: (raises: []).} =
|
||||||
let agent = SalesAgent(machine)
|
let agent = SalesAgent(machine)
|
||||||
|
let data = agent.data
|
||||||
|
let market = agent.context.market
|
||||||
|
|
||||||
|
without request =? data.request:
|
||||||
|
raiseAssert "no sale request"
|
||||||
|
|
||||||
|
var returnedCollateral = UInt256.none
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if state.returnsCollateral:
|
||||||
|
# The returnedCollateral is needed because a reservation could
|
||||||
|
# be created and the collateral assigned to that reservation.
|
||||||
|
# The returnedCollateral will be used in the cleanup function
|
||||||
|
# and be passed to the deleteReservation function.
|
||||||
|
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||||
|
returnedCollateral = request.ask.collateralPerSlot.some
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(
|
await onCleanUp(
|
||||||
reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes
|
reprocessSlot = state.reprocessSlot, returnedCollateral = returnedCollateral
|
||||||
)
|
)
|
||||||
except CancelledError as e:
|
except CancelledError as e:
|
||||||
trace "SaleIgnored.run was cancelled", error = e.msgDetail
|
trace "SaleIgnored.run was cancelled", error = e.msgDetail
|
||||||
|
|||||||
@ -51,15 +51,17 @@ method run*(
|
|||||||
await agent.subscribe()
|
await agent.subscribe()
|
||||||
|
|
||||||
without request =? data.request:
|
without request =? data.request:
|
||||||
raiseAssert "no sale request"
|
error "request could not be retrieved", id = data.requestId
|
||||||
|
let error = newException(SaleError, "request could not be retrieved")
|
||||||
|
return some State(SaleErrored(error: error))
|
||||||
|
|
||||||
let slotId = slotId(data.requestId, data.slotIndex)
|
let slotId = slotId(data.requestId, data.slotIndex)
|
||||||
let state = await market.slotState(slotId)
|
let state = await market.slotState(slotId)
|
||||||
if state != SlotState.Free and state != SlotState.Repair:
|
if state != SlotState.Free and state != SlotState.Repair:
|
||||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: false))
|
return some State(SaleIgnored(reprocessSlot: false))
|
||||||
|
|
||||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||||
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
|
# due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
slotIndex = data.slotIndex
|
slotIndex = data.slotIndex
|
||||||
@ -68,10 +70,12 @@ method run*(
|
|||||||
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
|
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
|
||||||
collateralPerByte = request.ask.collateralPerByte
|
collateralPerByte = request.ask.collateralPerByte
|
||||||
|
|
||||||
|
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||||
|
|
||||||
without availability =?
|
without availability =?
|
||||||
await reservations.findAvailability(
|
await reservations.findAvailability(
|
||||||
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
|
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
|
||||||
request.ask.collateralPerByte,
|
request.ask.collateralPerByte, requestEnd,
|
||||||
):
|
):
|
||||||
debug "No availability found for request, ignoring"
|
debug "No availability found for request, ignoring"
|
||||||
|
|
||||||
@ -80,9 +84,9 @@ method run*(
|
|||||||
info "Availability found for request, creating reservation"
|
info "Availability found for request, creating reservation"
|
||||||
|
|
||||||
without reservation =?
|
without reservation =?
|
||||||
await reservations.createReservation(
|
await noCancel reservations.createReservation(
|
||||||
availability.id, request.ask.slotSize, request.id, data.slotIndex,
|
availability.id, request.ask.slotSize, request.id, data.slotIndex,
|
||||||
request.ask.collateralPerByte,
|
request.ask.collateralPerByte, requestEnd,
|
||||||
), error:
|
), error:
|
||||||
trace "Creation of reservation failed"
|
trace "Creation of reservation failed"
|
||||||
# Race condition:
|
# Race condition:
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import ../../conf
|
import ../../conf
|
||||||
when codex_enable_proof_failures:
|
when storage_enable_proof_failures:
|
||||||
import std/strutils
|
import std/strutils
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
@ -40,7 +40,7 @@ when codex_enable_proof_failures:
|
|||||||
try:
|
try:
|
||||||
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
|
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
|
||||||
await market.submitProof(slot.id, Groth16Proof.default)
|
await market.submitProof(slot.id, Groth16Proof.default)
|
||||||
except Proofs_InvalidProof as e:
|
except ProofInvalidError as e:
|
||||||
discard # expected
|
discard # expected
|
||||||
except CancelledError as error:
|
except CancelledError as error:
|
||||||
raise error
|
raise error
|
||||||
|
|||||||
@ -44,12 +44,11 @@ method run*(
|
|||||||
try:
|
try:
|
||||||
trace "Reserving slot"
|
trace "Reserving slot"
|
||||||
await market.reserveSlot(data.requestId, data.slotIndex)
|
await market.reserveSlot(data.requestId, data.slotIndex)
|
||||||
|
except SlotReservationNotAllowedError as e:
|
||||||
|
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||||
|
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
|
||||||
except MarketError as e:
|
except MarketError as e:
|
||||||
if e.msg.contains "SlotReservations_ReservationNotAllowed":
|
return some State(SaleErrored(error: e))
|
||||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
|
||||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
|
||||||
else:
|
|
||||||
return some State(SaleErrored(error: e))
|
|
||||||
# other CatchableErrors are handled "automatically" by the SaleState
|
# other CatchableErrors are handled "automatically" by the SaleState
|
||||||
|
|
||||||
trace "Slot successfully reserved"
|
trace "Slot successfully reserved"
|
||||||
@ -58,7 +57,7 @@ method run*(
|
|||||||
# do not re-add this slot to the queue, and return bytes from Reservation to
|
# do not re-add this slot to the queue, and return bytes from Reservation to
|
||||||
# the Availability
|
# the Availability
|
||||||
debug "Slot cannot be reserved, ignoring"
|
debug "Slot cannot be reserved, ignoring"
|
||||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
|
||||||
except CancelledError as e:
|
except CancelledError as e:
|
||||||
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
|
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
|
|||||||
@ -38,6 +38,11 @@ method run*(
|
|||||||
await agent.retrieveRequest()
|
await agent.retrieveRequest()
|
||||||
await agent.subscribe()
|
await agent.subscribe()
|
||||||
|
|
||||||
|
without request =? data.request:
|
||||||
|
error "request could not be retrieved", id = data.requestId
|
||||||
|
let error = newException(SaleError, "request could not be retrieved")
|
||||||
|
return some State(SaleErrored(error: error))
|
||||||
|
|
||||||
let slotId = slotId(data.requestId, data.slotIndex)
|
let slotId = slotId(data.requestId, data.slotIndex)
|
||||||
let slotState = await market.slotState(slotId)
|
let slotState = await market.slotState(slotId)
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -113,17 +113,17 @@ func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural =
|
|||||||
|
|
||||||
self.numBlockCells * self.numSlotBlocks
|
self.numBlockCells * self.numSlotBlocks
|
||||||
|
|
||||||
func slotIndiciesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
|
func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
|
||||||
## Returns the slot indices.
|
## Returns the slot indices.
|
||||||
##
|
##
|
||||||
|
|
||||||
self.strategy.getIndicies(slot).catch
|
self.strategy.getIndices(slot).catch
|
||||||
|
|
||||||
func slotIndicies*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
|
func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
|
||||||
## Returns the slot indices.
|
## Returns the slot indices.
|
||||||
##
|
##
|
||||||
|
|
||||||
if iter =? self.strategy.getIndicies(slot).catch:
|
if iter =? self.strategy.getIndices(slot).catch:
|
||||||
return toSeq(iter)
|
return toSeq(iter)
|
||||||
|
|
||||||
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
|
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
|
||||||
@ -134,7 +134,7 @@ func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
|
|||||||
|
|
||||||
proc buildBlockTree*[T, H](
|
proc buildBlockTree*[T, H](
|
||||||
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
|
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
|
||||||
): Future[?!(seq[byte], T)] {.async.} =
|
): Future[?!(seq[byte], T)] {.async: (raises: [CancelledError]).} =
|
||||||
## Build the block digest tree and return a tuple with the
|
## Build the block digest tree and return a tuple with the
|
||||||
## block data and the tree.
|
## block data and the tree.
|
||||||
##
|
##
|
||||||
@ -167,7 +167,7 @@ proc buildBlockTree*[T, H](
|
|||||||
|
|
||||||
proc getCellHashes*[T, H](
|
proc getCellHashes*[T, H](
|
||||||
self: SlotsBuilder[T, H], slotIndex: Natural
|
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||||
): Future[?!seq[H]] {.async.} =
|
): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} =
|
||||||
## Collect all the cells from a block and return
|
## Collect all the cells from a block and return
|
||||||
## their hashes.
|
## their hashes.
|
||||||
##
|
##
|
||||||
@ -184,7 +184,7 @@ proc getCellHashes*[T, H](
|
|||||||
slotIndex = slotIndex
|
slotIndex = slotIndex
|
||||||
|
|
||||||
let hashes = collect(newSeq):
|
let hashes = collect(newSeq):
|
||||||
for i, blkIdx in self.strategy.getIndicies(slotIndex):
|
for i, blkIdx in self.strategy.getIndices(slotIndex):
|
||||||
logScope:
|
logScope:
|
||||||
blkIdx = blkIdx
|
blkIdx = blkIdx
|
||||||
pos = i
|
pos = i
|
||||||
@ -202,19 +202,23 @@ proc getCellHashes*[T, H](
|
|||||||
|
|
||||||
proc buildSlotTree*[T, H](
|
proc buildSlotTree*[T, H](
|
||||||
self: SlotsBuilder[T, H], slotIndex: Natural
|
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||||
): Future[?!T] {.async.} =
|
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||||
## Build the slot tree from the block digest hashes
|
## Build the slot tree from the block digest hashes
|
||||||
## and return the tree.
|
## and return the tree.
|
||||||
|
|
||||||
without cellHashes =? (await self.getCellHashes(slotIndex)), err:
|
try:
|
||||||
error "Failed to select slot blocks", err = err.msg
|
without cellHashes =? (await self.getCellHashes(slotIndex)), err:
|
||||||
return failure(err)
|
error "Failed to select slot blocks", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
T.init(cellHashes)
|
T.init(cellHashes)
|
||||||
|
except IndexingError as err:
|
||||||
|
error "Failed to build slot tree", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
proc buildSlot*[T, H](
|
proc buildSlot*[T, H](
|
||||||
self: SlotsBuilder[T, H], slotIndex: Natural
|
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||||
): Future[?!H] {.async.} =
|
): Future[?!H] {.async: (raises: [CancelledError]).} =
|
||||||
## Build a slot tree and store the proofs in
|
## Build a slot tree and store the proofs in
|
||||||
## the block store.
|
## the block store.
|
||||||
##
|
##
|
||||||
@ -250,7 +254,9 @@ proc buildSlot*[T, H](
|
|||||||
func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T =
|
func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T =
|
||||||
T.init(@slotRoots)
|
T.init(@slotRoots)
|
||||||
|
|
||||||
proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} =
|
proc buildSlots*[T, H](
|
||||||
|
self: SlotsBuilder[T, H]
|
||||||
|
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||||
## Build all slot trees and store them in the block store.
|
## Build all slot trees and store them in the block store.
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -280,7 +286,9 @@ proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} =
|
|||||||
|
|
||||||
success()
|
success()
|
||||||
|
|
||||||
proc buildManifest*[T, H](self: SlotsBuilder[T, H]): Future[?!Manifest] {.async.} =
|
proc buildManifest*[T, H](
|
||||||
|
self: SlotsBuilder[T, H]
|
||||||
|
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
|
||||||
if err =? (await self.buildSlots()).errorOption:
|
if err =? (await self.buildSlots()).errorOption:
|
||||||
error "Failed to build slot roots", err = err.msg
|
error "Failed to build slot roots", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
@ -302,7 +310,7 @@ proc new*[T, H](
|
|||||||
_: type SlotsBuilder[T, H],
|
_: type SlotsBuilder[T, H],
|
||||||
store: BlockStore,
|
store: BlockStore,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
strategy = SteppedStrategy,
|
strategy = LinearStrategy,
|
||||||
cellSize = DefaultCellSize,
|
cellSize = DefaultCellSize,
|
||||||
): ?!SlotsBuilder[T, H] =
|
): ?!SlotsBuilder[T, H] =
|
||||||
if not manifest.protected:
|
if not manifest.protected:
|
||||||
@ -315,13 +323,15 @@ proc new*[T, H](
|
|||||||
cellSize = cellSize
|
cellSize = cellSize
|
||||||
|
|
||||||
if (manifest.blocksCount mod manifest.numSlots) != 0:
|
if (manifest.blocksCount mod manifest.numSlots) != 0:
|
||||||
trace "Number of blocks must be divisable by number of slots."
|
const msg = "Number of blocks must be divisible by number of slots."
|
||||||
return failure("Number of blocks must be divisable by number of slots.")
|
trace msg
|
||||||
|
return failure(msg)
|
||||||
|
|
||||||
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
|
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
|
||||||
if (manifest.blockSize mod cellSize) != 0.NBytes:
|
if (manifest.blockSize mod cellSize) != 0.NBytes:
|
||||||
trace "Block size must be divisable by cell size."
|
const msg = "Block size must be divisible by cell size."
|
||||||
return failure("Block size must be divisable by cell size.")
|
trace msg
|
||||||
|
return failure(msg)
|
||||||
|
|
||||||
let
|
let
|
||||||
numSlotBlocks = manifest.numSlotBlocks
|
numSlotBlocks = manifest.numSlotBlocks
|
||||||
@ -344,7 +354,14 @@ proc new*[T, H](
|
|||||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||||
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
|
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
|
||||||
|
|
||||||
strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch
|
strategy =
|
||||||
|
?strategy.init(
|
||||||
|
0,
|
||||||
|
manifest.blocksCount - 1,
|
||||||
|
manifest.numSlots,
|
||||||
|
manifest.numSlots,
|
||||||
|
numPadSlotBlocks,
|
||||||
|
).catch
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
numSlotBlocks = numSlotBlocks
|
numSlotBlocks = numSlotBlocks
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2024 Status Research & Development GmbH
|
## Copyright (c) 2024 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2024 Status Research & Development GmbH
|
## Copyright (c) 2024 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2024 Status Research & Development GmbH
|
## Copyright (c) 2024 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2024 Status Research & Development GmbH
|
## Copyright (c) 2024 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -38,7 +38,9 @@ type
|
|||||||
AnyProof* = CircomProof
|
AnyProof* = CircomProof
|
||||||
|
|
||||||
AnySampler* = Poseidon2Sampler
|
AnySampler* = Poseidon2Sampler
|
||||||
|
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
|
||||||
AnyBuilder* = Poseidon2Builder
|
AnyBuilder* = Poseidon2Builder
|
||||||
|
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
|
||||||
|
|
||||||
AnyProofInputs* = ProofInputs[Poseidon2Hash]
|
AnyProofInputs* = ProofInputs[Poseidon2Hash]
|
||||||
Prover* = ref object of RootObj
|
Prover* = ref object of RootObj
|
||||||
@ -48,7 +50,7 @@ type
|
|||||||
|
|
||||||
proc prove*(
|
proc prove*(
|
||||||
self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge
|
self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge
|
||||||
): Future[?!(AnyProofInputs, AnyProof)] {.async.} =
|
): Future[?!(AnyProofInputs, AnyProof)] {.async: (raises: [CancelledError]).} =
|
||||||
## Prove a statement using backend.
|
## Prove a statement using backend.
|
||||||
## Returns a future that resolves to a proof.
|
## Returns a future that resolves to a proof.
|
||||||
|
|
||||||
@ -80,7 +82,7 @@ proc prove*(
|
|||||||
|
|
||||||
proc verify*(
|
proc verify*(
|
||||||
self: Prover, proof: AnyProof, inputs: AnyProofInputs
|
self: Prover, proof: AnyProof, inputs: AnyProofInputs
|
||||||
): Future[?!bool] {.async.} =
|
): Future[?!bool] {.async: (raises: [CancelledError]).} =
|
||||||
## Prove a statement using backend.
|
## Prove a statement using backend.
|
||||||
## Returns a future that resolves to a proof.
|
## Returns a future that resolves to a proof.
|
||||||
self.backend.verify(proof, inputs)
|
self.backend.verify(proof, inputs)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
## Nim-Codex
|
## Logos Storage
|
||||||
## Copyright (c) 2023 Status Research & Development GmbH
|
## Copyright (c) 2023 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
@ -48,12 +48,12 @@ func getCell*[T, H](
|
|||||||
|
|
||||||
proc getSample*[T, H](
|
proc getSample*[T, H](
|
||||||
self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H
|
self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H
|
||||||
): Future[?!Sample[H]] {.async.} =
|
): Future[?!Sample[H]] {.async: (raises: [CancelledError]).} =
|
||||||
let
|
let
|
||||||
cellsPerBlock = self.builder.numBlockCells
|
cellsPerBlock = self.builder.numBlockCells
|
||||||
blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index
|
blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index
|
||||||
blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index
|
blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index
|
||||||
origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx]
|
origBlockIdx = self.builder.slotIndices(self.index)[blkSlotIdx]
|
||||||
# convert to original dataset block index
|
# convert to original dataset block index
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
@ -81,7 +81,7 @@ proc getSample*[T, H](
|
|||||||
|
|
||||||
proc getProofInput*[T, H](
|
proc getProofInput*[T, H](
|
||||||
self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural
|
self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural
|
||||||
): Future[?!ProofInputs[H]] {.async.} =
|
): Future[?!ProofInputs[H]] {.async: (raises: [CancelledError]).} =
|
||||||
## Generate proofs as input to the proving circuit.
|
## Generate proofs as input to the proving circuit.
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user