Compare commits

...

48 Commits

Author SHA1 Message Date
gmega
a1631569d1
Revert "fix: remove cirdl from build.nims, remove marketplace from docker entrypoint"
This reverts commit 14e8fa749c8abce498e20ddea9ab5e0d371dbc51.
2026-01-16 16:50:47 -03:00
gmega
14e8fa749c
fix: remove cirdl from build.nims, remove marketplace from docker entrypoint 2026-01-16 16:48:46 -03:00
gmega
fdad2200f4
fix: remove cirdl from build; stop installing Rust; update Dockerfile; delete prover benchmarks 2026-01-16 16:42:57 -03:00
Eric
2a1a548341
refactor!: remove unused modules (#1362)
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
2026-01-16 19:03:54 +00:00
Giuliano Mega
e894fb03fa
feat: primitives for sampling with and without replacement (#1125) 2026-01-16 13:49:04 -03:00
Arnaud
da70ebff7c
chore: improve c bindings (#1361)
Signed-off-by: Arnaud <arnaud@status.im>
2026-01-16 14:33:34 +00:00
Arnaud
2073090ed7
fix: close the discovery store (#1364) 2026-01-16 13:10:30 +00:00
Eric
1acedcf71c
fix(ci): introduce a number of integration test fixes (#1342)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Arnaud <arnaud@status.im>
Co-authored-by: gmega <giuliano.mega@gmail.com>
2026-01-16 10:47:59 +00:00
Jacek Sieka
cce002fcbf
feat: async tree building v2 (#1360)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: munna0908 <munnasitu0908@gmail.com>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2026-01-15 18:13:14 +00:00
Jakub
50bd183984
chore(ci): use container, add timeout, tmp cleanup (#1369)
Signed-off-by: Jakub Sokołowski <jakub@status.im>
2026-01-13 17:42:52 +00:00
Arnaud
e59e98ba92
chore: update nix config (#1363) 2026-01-12 12:23:32 +00:00
Arnaud
60861d6af8
chore: rename codex to logos storage (#1359) 2025-12-18 17:23:09 +00:00
Eric
49e801803f
ci: remove dist tests and devnet deployment (#1338) 2025-12-17 06:03:59 +00:00
Jacek Sieka
858101c74c
chore: bump eth & networking (#1353) 2025-12-15 10:00:51 +00:00
Jacek Sieka
bd49591fff
chore: bump *-serialization (#1352) 2025-12-12 08:03:56 +00:00
Jacek Sieka
6765beee2c
chore: assorted bumps (#1351) 2025-12-11 21:03:36 +00:00
Jacek Sieka
45fec4b524
chore: bump libbacktrace (#1349) 2025-12-11 20:42:53 +00:00
Jacek Sieka
9ac9f6ff3c
chore: drop usage of upraises (#1348) 2025-12-11 09:03:43 +00:00
Arnaud
bd36032251
feat: add c binding (#1322)
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 07:34:09 +00:00
Chrysostomos Nanakos
be759baf4d
feat: Block exchange optimizations (#1325)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 05:47:02 +00:00
Eric
6147a751f1
fix(ci): Remove macos amd release build (#1337) 2025-11-13 05:37:43 +00:00
Eric
ee47ca8760
feat(libs): Use libp2p multiformats extensions instead of a rolling branch (#1329) 2025-11-13 04:48:33 +00:00
Eric
f791a960f2
fix(ci): Windows SIGILL in CI (#1339) 2025-11-03 11:45:02 +00:00
Arnaud
db8f866db4
feat: check if CID exists in local store (#1331) 2025-11-02 04:32:47 +00:00
Eric
7aca2f0e61
fix(ci): Move conventional commits job to workflow (#1340) 2025-11-02 04:00:55 +00:00
Eric
072bff5cab
fix: ci integration tests (#1335) 2025-10-30 19:38:11 +11:00
Arnaud
af55a761e6
chore: skip marketplace and long integration tests (#1326) 2025-10-22 19:22:33 +11:00
Adam Uhlíř
e3d8d195c3
chore: update nim-libp2p (#1323) 2025-10-01 13:19:15 +02:00
Slava
d1f2e2399b
ci: validate pr title to adhere conventional commits (#1254) 2025-08-12 08:51:41 +00:00
Slava
8cd10edb69
ci: auto deploy codex on devnet (#1302) 2025-07-28 10:02:19 +00:00
Slava
6cf99e255c
ci: release master builds and upload them to the cloud (#1298) 2025-07-10 11:17:11 +00:00
Dmitriy Ryajov
7eb2fb12cc
make default dirs runtime, not compile time. (#1292) 2025-06-26 18:44:24 +00:00
Slava
352273ff81
chore: bump codex-contracts-eth (#1293) 2025-06-26 18:09:48 +00:00
Slava
9ef9258720
chore(ci): bump node to v22 (#1285) 2025-06-26 01:11:00 +00:00
markspanbroek
7927afe715
chore: update nph dependency (#1279)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-25 10:30:48 +00:00
markspanbroek
01615354af
refactor(ci): run integration tests in parallel by spinning up more runners (#1287) 2025-06-25 08:56:16 +00:00
Chrysostomos Nanakos
baff902137
fix: resolve shared block request cancellation conflicts (#1284) 2025-06-24 15:05:25 +00:00
markspanbroek
4d44154a40
fix(ci): remove "update" to gcc-14 on windows (#1288) 2025-06-24 09:00:56 +00:00
markspanbroek
e1c397e112
fix(tests): auto import all tests files and fix forgotten tests (#1281) 2025-06-23 11:18:59 +00:00
Arnaud
7b660e3554
chore(marketplace): use hardhat ignition (#1195) 2025-06-20 15:55:00 +00:00
Arnaud
c5e424ff1b
feat(marketplace) - add status l2 (Linea) network (#1160) 2025-06-20 12:30:40 +00:00
Slava
36f64ad3e6
chore: update testnet marketplace address (#1283) 2025-06-20 06:13:58 +00:00
Ben Bierens
235c0ec842
chore: updates codex-contracts-eth submodule (#1278)
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2025-06-19 10:31:52 +00:00
Arnaud
d443df441d
chore: improve marketplace integration tests (#1268) 2025-06-19 06:36:10 +00:00
Arnaud
e35aec7870
chore: increase gas limits (#1272) 2025-06-18 12:18:56 +00:00
Slava
93e4e0f177
ci(docker): add stable tag for dist-tests images (#1273) 2025-06-16 16:22:09 +00:00
Slava
6db6bf5f72
feat(docker): adjust entrypoint (#1271)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-14 04:25:29 +00:00
Chrysostomos Nanakos
b305e00160
Add support for slot reconstruction on unavailable slot detection (#1235)
Co-authored-by: Arnaud <arnaud@status.im>
2025-06-12 22:19:42 +00:00
380 changed files with 8418 additions and 24059 deletions

View File

@ -12,9 +12,6 @@ inputs:
nim_version: nim_version:
description: "Nim version" description: "Nim version"
default: "v2.0.14" default: "v2.0.14"
rust_version:
description: "Rust version"
default: "1.79.0"
shell: shell:
description: "Shell to run commands in" description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail" default: "bash --noprofile --norc -e -o pipefail"
@ -24,12 +21,6 @@ inputs:
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Rust (Linux)
if: inputs.os == 'linux'
shell: ${{ inputs.shell }} {0}
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${{ inputs.rust_version }} -y
- name: APT (Linux amd64/arm64) - name: APT (Linux amd64/arm64)
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64') if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
@ -65,7 +56,6 @@ runs:
mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-toolchain
mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-cmake
mingw-w64-ucrt-x86_64-ntldd-git mingw-w64-ucrt-x86_64-ntldd-git
mingw-w64-ucrt-x86_64-rust
- name: MSYS2 (Windows i386) - name: MSYS2 (Windows i386)
if: inputs.os == 'windows' && inputs.cpu == 'i386' if: inputs.os == 'windows' && inputs.cpu == 'i386'
@ -79,17 +69,10 @@ runs:
mingw-w64-i686-toolchain mingw-w64-i686-toolchain
mingw-w64-i686-cmake mingw-w64-i686-cmake
mingw-w64-i686-ntldd-git mingw-w64-i686-ntldd-git
mingw-w64-i686-rust
- name: MSYS2 (Windows All) - Update to gcc 14
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
- name: Install gcc 14 on Linux - name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04) # We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }} if: ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
# Skip for older Ubuntu versions # Skip for older Ubuntu versions
@ -107,15 +90,22 @@ runs:
if: inputs.os == 'linux' || inputs.os == 'macos' if: inputs.os == 'linux' || inputs.os == 'macos'
uses: hendrikmuhs/ccache-action@v1.2 uses: hendrikmuhs/ccache-action@v1.2
with: with:
create-symlink: true create-symlink: false
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }} key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
evict-old-files: 7d evict-old-files: 7d
- name: Add ccache to path on Linux/Mac
if: inputs.os == 'linux' || inputs.os == 'macos'
shell: ${{ inputs.shell }} {0}
run: |
echo "/usr/lib/ccache:/usr/local/opt/ccache/libexec" >> "$GITHUB_PATH"
echo "/usr/local/opt/ccache/libexec" >> "$GITHUB_PATH"
- name: Install ccache on Windows - name: Install ccache on Windows
if: inputs.os == 'windows' if: inputs.os == 'windows'
uses: hendrikmuhs/ccache-action@v1.2 uses: hendrikmuhs/ccache-action@v1.2
with: with:
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }} key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
evict-old-files: 7d evict-old-files: 7d
- name: Enable ccache on Windows - name: Enable ccache on Windows
@ -123,11 +113,11 @@ runs:
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
CCACHE_DIR=$(dirname $(which ccache))/ccached CCACHE_DIR=$(dirname $(which ccache))/ccached
mkdir ${CCACHE_DIR} mkdir -p ${CCACHE_DIR}
ln -s $(which ccache) ${CCACHE_DIR}/gcc.exe ln -sf $(which ccache) ${CCACHE_DIR}/gcc.exe
ln -s $(which ccache) ${CCACHE_DIR}/g++.exe ln -sf $(which ccache) ${CCACHE_DIR}/g++.exe
ln -s $(which ccache) ${CCACHE_DIR}/cc.exe ln -sf $(which ccache) ${CCACHE_DIR}/cc.exe
ln -s $(which ccache) ${CCACHE_DIR}/c++.exe ln -sf $(which ccache) ${CCACHE_DIR}/c++.exe
echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2 echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2
- name: Derive environment variables - name: Derive environment variables
@ -208,10 +198,10 @@ runs:
- name: Restore Nim toolchain binaries from cache - name: Restore Nim toolchain binaries from cache
id: nim-cache id: nim-cache
uses: actions/cache@v4 uses: actions/cache@v4
if : ${{ inputs.coverage != 'true' }} if: ${{ inputs.coverage != 'true' }}
with: with:
path: NimBinaries path: NimBinaries
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }} restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}
- name: Set NIM_COMMIT - name: Set NIM_COMMIT
@ -224,7 +214,7 @@ runs:
run: | run: |
git config --global core.symlinks false git config --global core.symlinks false
- name: Build Nim and Codex dependencies - name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
which gcc which gcc

View File

@ -3,12 +3,14 @@ Tips for shorter build times
### Runner availability ### ### Runner availability ###
Currently, the biggest bottleneck when optimizing workflows is the availability When running on the Github free, pro or team plan, the bottleneck when
of Windows and macOS runners. Therefore, anything that reduces the time spent in optimizing workflows is the availability of macOS runners. Therefore, anything
Windows or macOS jobs will have a positive impact on the time waiting for that reduces the time spent in macOS jobs will have a positive impact on the
runners to become available. The usage limits for Github Actions are [described time waiting for runners to become available. On the Github enterprise plan,
here][limits]. You can see a breakdown of runner usage for your jobs in the this is not the case and you can more freely use parallelization on multiple
Github Actions tab ([example][usage]). runners. The usage limits for Github Actions are [described here][limits]. You
can see a breakdown of runner usage for your jobs in the Github Actions tab
([example][usage]).
### Windows is slow ### ### Windows is slow ###
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
Breaking up a long build job into several jobs that you run in parallel can have Breaking up a long build job into several jobs that you run in parallel can have
a positive impact on the wall clock time that a workflow runs. For instance, you a positive impact on the wall clock time that a workflow runs. For instance, you
might consider running unit tests and integration tests in parallel. Keep in might consider running unit tests and integration tests in parallel. When
mind however that availability of macOS and Windows runners is the biggest running on the Github free, pro or team plan, keep in mind that availability of
bottleneck. If you split a Windows job into two jobs, you now need to wait for macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
two Windows runners to become available! Therefore parallelization often only need to wait for two macOS runners to become available.
makes sense for Linux jobs.
### Refactoring ### ### Refactoring ###
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
to know whether you introduced a failure on all platforms, or only on a single to know whether you introduced a failure on all platforms, or only on a single
one. You might be tempted to disable fail-fast, but keep in mind that this keeps one. You might be tempted to disable fail-fast, but keep in mind that this keeps
runners busy for longer on a workflow that you know is going to fail anyway. runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed. Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage [usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action [composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows [reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache [cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

118
.github/workflows/artifacts.yml vendored Normal file
View File

@ -0,0 +1,118 @@
name: Libstorage artifacts
on:
push:
tags:
- "v*"
workflow_dispatch:
jobs:
build:
runs-on: ${{ matrix.target.os }}
strategy:
matrix:
target:
- os: ubuntu-latest
cpu: amd64
lib_ext: so
- os: ubuntu-24.04-arm
cpu: arm64
lib_ext: so
- os: macos-latest
lib_ext: dylib
cpu: arm64
- os: windows-latest
cpu: amd64
lib_ext: dll
steps:
- name: Check out sources
uses: actions/checkout@v4
with:
submodules: recursive
- name: MSYS2 (Windows amd64)
if: matrix.target.os == 'windows-latest' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: UCRT64
install: >-
base-devel
git
mingw-w64-ucrt-x86_64-toolchain
mingw-w64-ucrt-x86_64-cmake
mingw-w64-ucrt-x86_64-ntldd-git
run: |
pacman -Sy --noconfirm make
git config --global core.symlinks false
- name: Build libstorage (Linux)
if: matrix.target.lib_ext == 'so'
run: |
make -j${ncpu} update
make -j${ncpu} libstorage
- name: Build libstorage (MacOS)
if: matrix.target.os == 'macos-latest'
run: |
make -j${ncpu} update
STORAGE_LIB_PARAMS="--passL:\"-Wl,-install_name,@rpath/libstorage.dylib\"" make -j${ncpu} libstorage
- name: Build libstorage (Windows)
if: matrix.target.os == 'windows-latest'
shell: msys2 {0}
run: |
make -j${ncpu} update
make -j${ncpu} libstorage
- name: Package artifacts Linux
if: matrix.target.os == 'ubuntu-latest' || matrix.target.os == 'ubuntu-24.04-arm'
run: |
sudo apt-get update && sudo apt-get install -y zip
ZIPFILE=storage-linux-${{ matrix.target.cpu }}.zip
zip -j $ZIPFILE ./build/libstorage.${{ matrix.target.lib_ext }} ./library/libstorage.h
echo "ZIPFILE=$ZIPFILE" >> $GITHUB_ENV
- name: Package artifacts MacOS
if: matrix.target.os == 'macos-latest'
run: |
ZIPFILE=storage-macos-${{ matrix.target.cpu }}.zip
zip -j $ZIPFILE ./build/libstorage.${{ matrix.target.lib_ext }} ./library/libstorage.h
echo "ZIPFILE=$ZIPFILE" >> $GITHUB_ENV
- name: Package artifacts (Windows)
if: matrix.target.os == 'windows-latest'
shell: msys2 {0}
run: |
ZIPFILE=storage-windows-${{ matrix.target.cpu }}.zip
(cd ./build && 7z a -tzip "${GITHUB_WORKSPACE}/${ZIPFILE}" libstorage.dll)
(cd ./library && 7z a -tzip "${GITHUB_WORKSPACE}/${ZIPFILE}" libstorage.h)
echo "ZIPFILE=$ZIPFILE" >> $GITHUB_ENV
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ env.ZIPFILE }}
path: ${{ env.ZIPFILE }}
if-no-files-found: error
publish-release:
needs: build
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Check out sources
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v5
with:
path: dist
- name: Create release
uses: softprops/action-gh-release@v2
with:
files: dist/**
draft: true

View File

@ -24,9 +24,9 @@ jobs:
run: run:
shell: ${{ matrix.shell }} {0} shell: ${{ matrix.shell }} {0}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }} name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
runs-on: ${{ matrix.builder }} runs-on: ${{ matrix.builder }}
timeout-minutes: 120 timeout-minutes: 90
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -48,43 +48,21 @@ jobs:
if: matrix.tests == 'unittest' || matrix.tests == 'all' if: matrix.tests == 'unittest' || matrix.tests == 'all'
run: make -j${ncpu} test run: make -j${ncpu} test
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Start Ethereum node with Codex contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |
npm install
npm start &
## Part 2 Tests ## ## Part 2 Tests ##
- name: Contract tests
if: matrix.tests == 'contract' || matrix.tests == 'all'
run: make -j${ncpu} testContracts
## Part 3 Tests ##
- name: Integration tests - name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all' if: matrix.tests == 'integration' || matrix.tests == 'all'
run: make -j${ncpu} testIntegration env:
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
run: make -j${ncpu} DEBUG=${{ runner.debug }} testIntegration
- name: Upload integration tests log files - name: Upload integration tests log files
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always() if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
with: with:
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
path: tests/integration/logs/ path: tests/integration/logs/
retention-days: 1 retention-days: 1
## Part 4 Tools ##
- name: Tools tests
if: matrix.tests == 'tools' || matrix.tests == 'all'
run: make -j${ncpu} testTools
status: status:
if: always() if: always()
needs: [build] needs: [build]

View File

@ -16,29 +16,22 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
matrix: matrix:
name: Compute matrix
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix - name: Compute matrix
id: matrix id: matrix
uses: fabiocaccamo/create-matrix-action@v5 run: |
with: echo 'matrix<<EOF' >> $GITHUB_OUTPUT
matrix: | tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
build: build:
needs: matrix needs: matrix
@ -92,3 +85,29 @@ jobs:
name: codecov-umbrella name: codecov-umbrella
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
verbose: true verbose: true
cbinding:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ github.event.pull_request.head.sha }}
- name: Setup Nimbus Build System
uses: ./.github/actions/nimbus-build-system
with:
os: linux
nim_version: ${{ env.nim_version }}
- name: C Binding build
run: |
make -j${ncpu} update
make -j${ncpu} libstorage
- name: C Binding test
run: |
cd examples/c
gcc -o storage storage.c -L../../build -lstorage -Wl,-rpath,../../ -pthread
LD_LIBRARY_PATH=../../build ./storage

View File

@ -0,0 +1,19 @@
name: Conventional Commits Linting
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
merge_group:
jobs:
pr-title:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@1.4.1
with:
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'

View File

@ -1,54 +0,0 @@
name: Docker - Dist-Tests
on:
push:
branches:
- master
tags:
- 'v*.*.*'
paths-ignore:
- '**/*.md'
- '.gitignore'
- '.github/**'
- '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml'
- 'docker/**'
- '!docker/codex.Dockerfile'
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
inputs:
run_release_tests:
description: Run Release tests
required: false
type: boolean
default: false
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests"
run_release_tests: ${{ inputs.run_release_tests }}
secrets: inherit

View File

@ -68,6 +68,10 @@ on:
description: Specifies compatible smart contract image description: Specifies compatible smart contract image
required: false required: false
type: string type: string
outputs:
codex_image:
description: Logos Storage Docker image tag
value: ${{ jobs.publish.outputs.codex_image }}
env: env:
@ -83,7 +87,7 @@ env:
TAG_SUFFIX: ${{ inputs.tag_suffix }} TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }} CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests # Tests
TESTS_SOURCE: codex-storage/cs-codex-dist-tests TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }} CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }} CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
@ -91,15 +95,16 @@ env:
jobs: jobs:
# Compute variables
compute: compute:
name: Compute build ID name: Compute build ID
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
build_id: ${{ steps.build_id.outputs.build_id }} build_id: ${{ steps.build_id.outputs.build_id }}
steps: steps:
- name: Generate unique build id - name: Generate unique build id
id: build_id id: build_id
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
# Build platform specific image # Build platform specific image
build: build:
@ -134,7 +139,7 @@ jobs:
run: | run: |
# Create contract label for compatible contract image if specified # Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi fi
- name: Docker - Meta - name: Docker - Meta
@ -189,35 +194,35 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.meta.outputs.version }} version: ${{ steps.meta.outputs.version }}
codex_image: ${{ steps.image_tag.outputs.codex_image }}
needs: [build, compute] needs: [build, compute]
steps: steps:
- name: Docker - Variables - name: Docker - Variables
run: | run: |
# Adjust custom suffix when set and # Adjust custom suffix when set
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi fi
# Disable SHA tags on tagged release # Disable SHA tags on tagged release
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
echo "TAG_SHA=false" >>$GITHUB_ENV echo "TAG_SHA=false" >> $GITHUB_ENV
fi fi
# Handle latest and latest-custom using raw # Handle latest and latest-custom using raw
if [[ ${{ env.TAG_SHA }} == "false" ]]; then if [[ ${{ env.TAG_SHA }} == "false" ]]; then
echo "TAG_LATEST=false" >>$GITHUB_ENV echo "TAG_LATEST=false" >> $GITHUB_ENV
echo "TAG_RAW=true" >>$GITHUB_ENV echo "TAG_RAW=true" >> $GITHUB_ENV
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
else else
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi fi
else else
echo "TAG_RAW=false" >>$GITHUB_ENV echo "TAG_RAW=false" >> $GITHUB_ENV
fi fi
# Create contract label for compatible contract image if specified # Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi fi
- name: Docker - Download digests - name: Docker - Download digests
@ -257,9 +262,12 @@ jobs:
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *) $(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
- name: Docker - Image tag
id: image_tag
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
- name: Docker - Inspect image - name: Docker - Inspect image
run: | run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
# Compute Tests inputs # Compute Tests inputs
@ -308,7 +316,7 @@ jobs:
max-parallel: 1 max-parallel: 1
matrix: matrix:
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }} tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with: with:
source: ${{ needs.compute-tests-inputs.outputs.source }} source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }} branch: ${{ needs.compute-tests-inputs.outputs.branch }}
@ -325,7 +333,7 @@ jobs:
name: Run Release Tests name: Run Release Tests
needs: [compute-tests-inputs] needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }} if: ${{ inputs.run_release_tests == 'true' }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with: with:
source: ${{ needs.compute-tests-inputs.outputs.source }} source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }} branch: ${{ needs.compute-tests-inputs.outputs.branch }}

View File

@ -19,26 +19,10 @@ on:
workflow_dispatch: workflow_dispatch:
jobs: jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push: build-and-push:
name: Build and Push name: Build and Push
uses: ./.github/workflows/docker-reusable.yml uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with: with:
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }} tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }} tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
secrets: inherit secrets: inherit

View File

@ -52,7 +52,7 @@ jobs:
node-version: 18 node-version: 18
- name: Build OpenAPI - name: Build OpenAPI
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API" run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection - name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false

View File

@ -8,22 +8,21 @@ env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned nim_version: pinned
jobs: jobs:
matrix: matrix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Compute matrix - name: Checkout sources
id: matrix uses: actions/checkout@v4
uses: fabiocaccamo/create-matrix-action@v5 - name: Compute matrix
with: id: matrix
matrix: | run: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'matrix<<EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
build: build:
needs: matrix needs: matrix

View File

@ -4,14 +4,14 @@ on:
push: push:
tags: tags:
- 'v*.*.*' - 'v*.*.*'
branches:
- master
workflow_dispatch: workflow_dispatch:
env: env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned nim_version: pinned
rust_version: 1.79.0 storage_binary_base: storage
codex_binary_base: codex
cirdl_binary_base: cirdl
build_dir: build build_dir: build
nim_flags: '' nim_flags: ''
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll' windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
@ -28,11 +28,10 @@ jobs:
uses: fabiocaccamo/create-matrix-action@v5 uses: fabiocaccamo/create-matrix-action@v5
with: with:
matrix: | matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
# Build # Build
build: build:
@ -62,7 +61,6 @@ jobs:
cpu: ${{ matrix.cpu }} cpu: ${{ matrix.cpu }}
shell: ${{ matrix.shell }} shell: ${{ matrix.shell }}
nim_version: ${{ matrix.nim_version }} nim_version: ${{ matrix.nim_version }}
rust_version: ${{ matrix.rust_version }}
- name: Release - Compute binary name - name: Release - Compute binary name
run: | run: |
@ -72,19 +70,15 @@ jobs:
windows*) os_name="windows" ;; windows*) os_name="windows" ;;
esac esac
github_ref_name="${GITHUB_REF_NAME/\//-}" github_ref_name="${GITHUB_REF_NAME/\//-}"
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then if [[ ${os_name} == "windows" ]]; then
codex_binary="${codex_binary}.exe" storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe"
fi fi
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
- name: Release - Build - name: Release - Build
run: | run: |
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}" make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Libraries - name: Release - Libraries
run: | run: |
@ -94,21 +88,14 @@ jobs:
done done
fi fi
- name: Release - Upload codex build artifacts - name: Release - Upload Logos Storage build artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: release-${{ env.codex_binary }} name: release-${{ env.storage_binary }}
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}* path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
retention-days: 30 retention-days: 30
- name: Release - Upload cirdl build artifacts - name: Release - Upload Windows libs
uses: actions/upload-artifact@v4
with:
name: release-${{ env.cirdl_binary }}
path: ${{ env.build_dir }}/${{ env.cirdl_binary_base }}*
retention-days: 30
- name: Release - Upload windows libs
if: matrix.os == 'windows' if: matrix.os == 'windows'
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
@ -138,7 +125,7 @@ jobs:
} }
# Compress and prepare # Compress and prepare
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do for file in ${{ env.storage_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only # Windows - binary only
@ -170,6 +157,34 @@ jobs:
path: /tmp/release/ path: /tmp/release/
retention-days: 30 retention-days: 30
- name: Release - Upload to the cloud
env:
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
s3_bucket: ${{ secrets.S3_BUCKET }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
run: |
# Variables
branch="${GITHUB_REF_NAME/\//-}"
folder="/tmp/release"
# Tagged releases
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
echo "${branch}" > "${folder}"/latest
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
rm -f "${folder}"/latest
# master branch
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
# Custom branch
else
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
fi
- name: Release - name: Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
@ -183,6 +198,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
with: with:
token: ${{ secrets.DISPATCH_PAT }} token: ${{ secrets.DISPATCH_PAT }}
repository: codex-storage/py-codex-api-client repository: logos-storage/logos-storage-py-api-client
event-type: generate event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}' client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

3
.gitignore vendored
View File

@ -47,3 +47,6 @@ nim.cfg
tests/integration/logs tests/integration/logs
data/ data/
examples/c/data-dir
examples/c/downloaded_hello.txt

48
.gitmodules vendored
View File

@ -37,22 +37,17 @@
path = vendor/nim-nitro path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git url = https://github.com/status-im/nim-nitro.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/questionable"] [submodule "vendor/questionable"]
path = vendor/questionable path = vendor/questionable
url = https://github.com/status-im/questionable.git url = https://github.com/status-im/questionable.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
[submodule "vendor/asynctest"] [submodule "vendor/asynctest"]
path = vendor/asynctest path = vendor/asynctest
url = https://github.com/status-im/asynctest.git url = https://github.com/status-im/asynctest.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-presto"] [submodule "vendor/nim-presto"]
path = vendor/nim-presto path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git url = https://github.com/status-im/nim-presto.git
@ -132,7 +127,7 @@
path = vendor/nim-websock path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git url = https://github.com/status-im/nim-websock.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-contract-abi"] [submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi url = https://github.com/status-im/nim-contract-abi
@ -160,13 +155,10 @@
path = vendor/nim-taskpools path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git url = https://github.com/status-im/nim-taskpools.git
ignore = untracked ignore = untracked
branch = master branch = stable
[submodule "vendor/nim-leopard"] [submodule "vendor/logos-storage-nim-dht"]
path = vendor/nim-leopard path = vendor/logos-storage-nim-dht
url = https://github.com/status-im/nim-leopard.git url = https://github.com/logos-storage/logos-storage-nim-dht.git
[submodule "vendor/nim-codex-dht"]
path = vendor/nim-codex-dht
url = https://github.com/codex-storage/nim-codex-dht.git
ignore = untracked ignore = untracked
branch = master branch = master
[submodule "vendor/nim-datastore"] [submodule "vendor/nim-datastore"]
@ -178,9 +170,6 @@
[submodule "vendor/nim-eth"] [submodule "vendor/nim-eth"]
path = vendor/nim-eth path = vendor/nim-eth
url = https://github.com/status-im/nim-eth url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"]
path = vendor/codex-contracts-eth
url = https://github.com/status-im/codex-contracts-eth
[submodule "vendor/nim-protobuf-serialization"] [submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization url = https://github.com/status-im/nim-protobuf-serialization
@ -193,28 +182,15 @@
[submodule "vendor/npeg"] [submodule "vendor/npeg"]
path = vendor/npeg path = vendor/npeg
url = https://github.com/zevv/npeg url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git
[submodule "vendor/constantine"] [submodule "vendor/constantine"]
path = vendor/constantine path = vendor/constantine
url = https://github.com/mratsim/constantine.git url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git
ignore = untracked
branch = master
[submodule "vendor/codex-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
ignore = untracked
branch = master
[submodule "vendor/nim-serde"] [submodule "vendor/nim-serde"]
path = vendor/nim-serde path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"] [submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"] [submodule "vendor/nim-zippy"]
path = vendor/nim-zippy path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git url = https://github.com/status-im/nim-zippy.git
@ -225,9 +201,9 @@
path = vendor/nim-quic path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git url = https://github.com/vacp2p/nim-quic.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-ngtcp2"] [submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2 path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked ignore = untracked
branch = master branch = main

View File

@ -93,15 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.' # default target, because it's the first one that doesn't start with '.'
# Builds the codex binary # Builds the Logos Storage binary
all: | build deps all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim toolsCirdl $(NIM_PARAMS) build.nims
# must be included after the default target # must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk -include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
@ -135,31 +130,16 @@ test: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
# Builds and runs the smart contract tests
testContracts: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs the integration tests # Builds and runs the integration tests
testIntegration: | build deps testIntegration: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims $(ENV_SCRIPT) nim testIntegration $(TEST_PARAMS) $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs all tests (except for Taiko L2 tests) # Builds and runs all tests (except for Taiko L2 tests)
testAll: | build deps testAll: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
# Builds and runs Taiko L2 tests
testTaiko: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) build.nims
# Builds and runs tool tests
testTools: | cirdl
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTools $(NIM_PARAMS) build.nims
# nim-libbacktrace # nim-libbacktrace
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
libbacktrace: libbacktrace:
@ -232,6 +212,7 @@ format:
$(NPH) *.nim $(NPH) *.nim
$(NPH) codex/ $(NPH) codex/
$(NPH) tests/ $(NPH) tests/
$(NPH) library/
clean-nph: clean-nph:
rm -f $(NPH) rm -f $(NPH)
@ -242,4 +223,32 @@ print-nph-path:
clean: | clean-nph clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) codex.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) codex.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) codex.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) codex.nims
endif
endif # "variables.mk" was not included endif # "variables.mk" was not included

View File

@ -1,22 +1,22 @@
# Codex Decentralized Durability Engine # Logos Storage Decentralized Engine
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval. > The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha. > WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability) [![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster) [![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster) [![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex) [![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ) [![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex) ![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
## Build and Run ## Build and Run
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build). For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
To build the project, clone it and run: To build the project, clone it and run:
@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root
Run the client with: Run the client with:
```bash ```bash
build/codex build/storage
``` ```
## Configuration ## Configuration
It is possible to configure a Codex node in several ways: It is possible to configure a Logos Storage node in several ways:
1. CLI options 1. CLI options
2. Environment variables 2. Environment variables
3. Configuration file 3. Configuration file
@ -45,21 +45,71 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
## Guides ## Guides
To get acquainted with Codex, consider: To get acquainted with Logos Storage, consider:
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and; * running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well. * if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API ## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
Currently, only a Go binding is included.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
Build the Go example:
```bash
go build -o storage-go examples/golang/storage.go
```
Export the library path:
```bash
export LD_LIBRARY_PATH=build
```
Run the example:
```bash
./storage-go
```
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development ## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting ### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. `logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`. If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`. In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.

View File

@ -1,2 +0,0 @@
ceremony
circuit_bench_*

View File

@ -1,33 +0,0 @@
## Benchmark Runner
Modify `runAllBenchmarks` proc in `run_benchmarks.nim` to the desired parameters and variations.
Then run it:
```sh
nim c -r run_benchmarks
```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI
Runs Codex's prover setup with Ark / Circom.
Compile:
```sh
nim c codex_ark_prover_cli.nim
```
Run to see usage:
```sh
./codex_ark_prover_cli.nim -h
```

View File

@ -1,15 +0,0 @@
--path:
".."
--path:
"../tests"
--threads:
on
--tlsEmulation:
off
--d:
release
# when not defined(chronicles_log_level):
# --define:"chronicles_log_level:NONE" # compile all log statements
# --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime
# --"import":"logging" # ensure that logging is ignored at runtime

View File

@ -1,187 +0,0 @@
import std/[hashes, json, strutils, strformat, os, osproc, uri]
import ./utils
type
CircuitEnv* = object
nimCircuitCli*: string
circuitDirIncludes*: string
ptauPath*: string
ptauUrl*: Uri
codexProjDir*: string
CircuitArgs* = object
depth*: int
maxslots*: int
cellsize*: int
blocksize*: int
nsamples*: int
entropy*: int
seed*: int
nslots*: int
ncells*: int
index*: int
proc findCodexProjectDir(): string =
## find codex proj dir -- assumes this script is in codex/benchmarks
result = currentSourcePath().parentDir.parentDir
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir()
result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli"
result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
result.codexProjDir = codexDir
proc check*(env: var CircuitEnv) =
## check that the CWD of script is in the codex parent
let codexProjDir = findCodexProjectDir()
echo "\n\nFound project dir: ", codexProjDir
let snarkjs = findExe("snarkjs")
if snarkjs == "":
echo dedent"""
ERROR: must install snarkjs first
npm install -g snarkjs@latest
"""
let circom = findExe("circom")
if circom == "":
echo dedent"""
ERROR: must install circom first
git clone https://github.com/iden3/circom.git
cargo install --path circom
"""
if snarkjs == "" or circom == "":
quit 2
echo "Found SnarkJS: ", snarkjs
echo "Found Circom: ", circom
if not env.nimCircuitCli.fileExists:
echo "Nim Circuit reference cli not found: ", env.nimCircuitCli
echo "Building Circuit reference cli...\n"
withDir env.nimCircuitCli.parentDir:
runit "nimble build -d:release --styleCheck:off cli"
echo "CWD: ", getCurrentDir()
assert env.nimCircuitCli.fileExists()
echo "Found NimCircuitCli: ", env.nimCircuitCli
echo "Found Circuit Path: ", env.circuitDirIncludes
echo "Found PTAU file: ", env.ptauPath
proc downloadPtau*(ptauPath: string, ptauUrl: Uri) =
## download ptau file using curl if needed
if not ptauPath.fileExists:
echo "Ceremony file not found, downloading..."
createDir ptauPath.parentDir
withDir ptauPath.parentDir:
runit fmt"curl --output '{ptauPath}' '{$ptauUrl}/{ptauPath.splitPath().tail}'"
else:
echo "Found PTAU file at: ", ptauPath
proc getCircuitBenchStr*(args: CircuitArgs): string =
for f, v in fieldPairs(args):
result &= "_" & f & $v
proc getCircuitBenchPath*(args: CircuitArgs, env: CircuitEnv): string =
## generate folder name for unique circuit args
result = env.codexProjDir / "benchmarks/circuit_bench" & getCircuitBenchStr(args)
proc generateCircomAndSamples*(args: CircuitArgs, env: CircuitEnv, name: string) =
## run nim circuit and sample generator
var cliCmd = env.nimCircuitCli
for f, v in fieldPairs(args):
cliCmd &= " --" & f & "=" & $v
if not "input.json".fileExists:
echo "Generating Circom Files..."
runit fmt"{cliCmd} -v --circom={name}.circom --output=input.json"
proc createCircuit*(
args: CircuitArgs,
env: CircuitEnv,
name = "proof_main",
circBenchDir = getCircuitBenchPath(args, env),
someEntropy = "some_entropy_75289v3b7rcawcsyiur",
doGenerateWitness = false,
): tuple[dir: string, name: string] =
## Generates all the files needed for to run a proof circuit. Downloads the PTAU file if needed.
##
## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs.
##
let circdir = circBenchDir
downloadPtau env.ptauPath, env.ptauUrl
echo "Creating circuit dir: ", circdir
createDir circdir
withDir circdir:
writeFile("circuit_params.json", pretty(%*args))
let
inputs = circdir / "input.json"
zkey = circdir / fmt"{name}.zkey"
wasm = circdir / fmt"{name}.wasm"
r1cs = circdir / fmt"{name}.r1cs"
wtns = circdir / fmt"{name}.wtns"
generateCircomAndSamples(args, env, name)
if not wasm.fileExists or not r1cs.fileExists:
runit fmt"circom --r1cs --wasm --O2 -l{env.circuitDirIncludes} {name}.circom"
moveFile fmt"{name}_js" / fmt"{name}.wasm", fmt"{name}.wasm"
echo "Found wasm: ", wasm
echo "Found r1cs: ", r1cs
if not zkey.fileExists:
echo "ZKey not found, generating..."
putEnv "NODE_OPTIONS", "--max-old-space-size=8192"
if not fmt"{name}_0000.zkey".fileExists:
runit fmt"snarkjs groth16 setup {r1cs} {env.ptauPath} {name}_0000.zkey"
echo fmt"Generated {name}_0000.zkey"
let cmd =
fmt"snarkjs zkey contribute {name}_0000.zkey {name}_0001.zkey --name='1st Contributor Name'"
echo "CMD: ", cmd
let cmdRes = execCmdEx(cmd, options = {}, input = someEntropy & "\n")
assert cmdRes.exitCode == 0
moveFile fmt"{name}_0001.zkey", fmt"{name}.zkey"
removeFile fmt"{name}_0000.zkey"
if not wtns.fileExists and doGenerateWitness:
runit fmt"node generate_witness.js {wtns} ../input.json ../witness.wtns"
return (circdir, name)
when isMainModule:
echo "findCodexProjectDir: ", findCodexProjectDir()
## test run creating a circuit
var env = CircuitEnv.default()
env.check()
let args = CircuitArgs(
depth: 32, # maximum depth of the slot tree
maxslots: 256, # maximum number of slots
cellsize: 2048, # cell size in bytes
blocksize: 65536, # block size in bytes
nsamples: 5, # number of samples to prove
entropy: 1234567, # external randomness
seed: 12345, # seed for creating fake data
nslots: 11, # number of slots in the dataset
index: 3, # which slot we prove (0..NSLOTS-1)
ncells: 512, # number of cells in this slot
)
let benchenv = createCircuit(args, env)
echo "\nBench dir:\n", benchenv

View File

@ -1,105 +0,0 @@
import std/[sequtils, strformat, os, options, importutils]
import std/[times, os, strutils, terminal]
import pkg/questionable
import pkg/questionable/results
import pkg/datastore
import pkg/codex/[rng, stores, merkletree, codextypes, slots]
import pkg/codex/utils/[json, poseidon2digest]
import pkg/codex/slots/[builder, sampler/utils, backends/helpers]
import pkg/constantine/math/[arithmetic, io/io_bigints, io/io_fields]
import ./utils
import ./create_circuits
type CircuitFiles* = object
r1cs*: string
wasm*: string
zkey*: string
inputs*: string
proc runArkCircom(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
echo "Loading sample proof..."
var
inputData = files.inputs.readFile()
inputJson = !JsonNode.parse(inputData)
proofInputs = Poseidon2Hash.jsonToProofInput(inputJson)
circom = CircomCompat.init(
files.r1cs,
files.wasm,
files.zkey,
slotDepth = args.depth,
numSamples = args.nsamples,
)
defer:
circom.release() # this comes from the rust FFI
echo "Sample proof loaded..."
echo "Proving..."
let nameArgs = getCircuitBenchStr(args)
var proof: CircomProof
benchmark fmt"prover-{nameArgs}", benchmarkLoops:
proof = circom.prove(proofInputs).tryGet
var verRes: bool
benchmark fmt"verify-{nameArgs}", benchmarkLoops:
verRes = circom.verify(proof, proofInputs).tryGet
echo "verify result: ", verRes
proc runRapidSnark(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
# time rapidsnark ${CIRCUIT_MAIN}.zkey witness.wtns proof.json public.json
echo "generating the witness..."
## TODO
proc runBenchmark(args: CircuitArgs, env: CircuitEnv, benchmarkLoops: int) =
## execute benchmarks given a set of args
## will create a folder in `benchmarks/circuit_bench_$(args)`
##
let env = createCircuit(args, env)
## TODO: copy over testcircomcompat proving
let files = CircuitFiles(
r1cs: env.dir / fmt"{env.name}.r1cs",
wasm: env.dir / fmt"{env.name}.wasm",
zkey: env.dir / fmt"{env.name}.zkey",
inputs: env.dir / fmt"input.json",
)
runArkCircom(args, files, benchmarkLoops)
proc runAllBenchmarks*() =
echo "Running benchmark"
# setup()
var env = CircuitEnv.default()
env.check()
var args = CircuitArgs(
depth: 32, # maximum depth of the slot tree
maxslots: 256, # maximum number of slots
cellsize: 2048, # cell size in bytes
blocksize: 65536, # block size in bytes
nsamples: 1, # number of samples to prove
entropy: 1234567, # external randomness
seed: 12345, # seed for creating fake data
nslots: 11, # number of slots in the dataset
index: 3, # which slot we prove (0..NSLOTS-1)
ncells: 512, # number of cells in this slot
)
let
numberSamples = 3
benchmarkLoops = 5
for i in 1 .. numberSamples:
args.nsamples = i
stdout.styledWriteLine(fgYellow, "\nbenchmarking args: ", $args)
runBenchmark(args, env, benchmarkLoops)
printBenchMarkSummaries()
when isMainModule:
runAllBenchmarks()

View File

@ -1,76 +0,0 @@
import std/tables
template withDir*(dir: string, blk: untyped) =
## set working dir for duration of blk
let prev = getCurrentDir()
try:
setCurrentDir(dir)
`blk`
finally:
setCurrentDir(prev)
template runit*(cmd: string) =
## run shell commands and verify it runs without an error code
echo "RUNNING: ", cmd
let cmdRes = execShellCmd(cmd)
echo "STATUS: ", cmdRes
assert cmdRes == 0
var benchRuns* = newTable[string, tuple[avgTimeSec: float, count: int]]()
func avg(vals: openArray[float]): float =
for v in vals:
result += v / vals.len().toFloat()
template benchmark*(name: untyped, count: int, blk: untyped) =
let benchmarkName: string = name
## simple benchmarking of a block of code
var runs = newSeqOfCap[float](count)
for i in 1 .. count:
block:
let t0 = epochTime()
`blk`
let elapsed = epochTime() - t0
runs.add elapsed
var elapsedStr = ""
for v in runs:
elapsedStr &= ", " & v.formatFloat(format = ffDecimal, precision = 3)
stdout.styledWriteLine(
fgGreen, "CPU Time [", benchmarkName, "] ", "avg(", $count, "): ", elapsedStr, " s"
)
benchRuns[benchmarkName] = (runs.avg(), count)
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
if printRegular:
echo ""
for k, v in benchRuns:
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
if printTsv:
echo ""
echo "name", "\t", "avgTimeSec", "\t", "count"
for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math
func floorLog2*(x: int): int =
var k = -1
var y = x
while (y > 0):
k += 1
y = y shr 1
return k
func ceilingLog2*(x: int): int =
if (x == 0):
return -1
else:
return (floorLog2(x - 1) + 1)
func checkPowerOfTwo*(x: int, what: string): int =
let k = ceilingLog2(x)
assert(x == 2 ^ k, ("`" & what & "` is expected to be a power of 2"))
return x

View File

@ -1,9 +1,20 @@
mode = ScriptMode.Verbose mode = ScriptMode.Verbose
import std/os except commandLineParams import std/os except commandLineParams
import std/strutils
### Helper functions ### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = proc truthy(val: string): bool =
const truthySwitches = @["yes", "1", "on", "true"]
return val in truthySwitches
proc buildBinary(
srcName: string,
outName = os.lastPathPart(srcName),
srcDir = "./",
params = "",
lang = "c",
) =
if not dirExists "build": if not dirExists "build":
mkDir "build" mkDir "build"
@ -18,58 +29,67 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
let let
# Place build output in 'build' folder, even if name includes a longer path. # Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd = cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
name & ".nim" srcName & ".nim"
exec(cmd) exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") = proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
buildBinary name, srcDir, params if not dirExists "build":
exec "build/" & name mkDir "build"
task codex, "build codex binary": if `type` == "dynamic":
let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:chronicles_runtime_filtering " & "-d:chronicles_log_level=TRACE " & params &
" " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:chronicles_runtime_filtering " & "-d:chronicles_log_level=TRACE " & params &
" " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task storage, "build logos storage binary":
buildBinary "codex", buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE" params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary": task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl" buildBinary "tools/cirdl/cirdl"
task testCodex, "Build & run Codex tests": task testStorage, "Build & run Logos Storage tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true" test "testCodex", outName = "testStorage"
task testContracts, "Build & run Codex Contract tests":
test "testContracts"
task testIntegration, "Run integration tests": task testIntegration, "Run integration tests":
buildBinary "codex", buildBinary "codex",
params = outName = "storage",
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true" params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
test "testIntegration" test "testIntegration"
# use params to enable logging from the integration test executable # use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " & # test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE" # "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary": task build, "build Logos Storage binary":
codexTask() storageTask()
task test, "Run tests": task test, "Run tests":
testCodexTask() testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)": task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask() testStorageTask()
testContractsTask()
testIntegrationTask() testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
codexTask()
test "testTaiko"
import strutils import strutils
import os import os
@ -99,9 +119,7 @@ task coverage, "generates code coverage report":
echo "======== Running Tests ======== " echo "======== Running Tests ======== "
test "coverage", test "coverage",
srcDir = "tests/", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release"
params =
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c") exec("rm nimcache/coverage/*.c")
rmDir("coverage") rmDir("coverage")
mkDir("coverage") mkDir("coverage")
@ -114,10 +132,32 @@ task coverage, "generates code coverage report":
nimSrcs nimSrcs
) )
echo " ======== Generating HTML coverage report ======== " echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ") exec(
"genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report "
)
echo " ======== Coverage report Done ======== " echo " ======== Coverage report Done ======== "
task showCoverage, "open coverage html": task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== " echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "": if findExe("open") != "":
exec("open coverage/report/index.html") exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

51
ci/linux.Jenkinsfile Normal file
View File

@ -0,0 +1,51 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.37'
pipeline {
agent {
docker {
label 'linuxcontainer'
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
args '--volume=/nix:/nix ' +
'--volume=/etc/nix:/etc/nix '
}
}
options {
timestamps()
ansiColor('xterm')
timeout(time: 20, unit: 'MINUTES')
disableConcurrentBuilds()
disableRestartFromStage()
/* manage how many builds we keep */
buildDiscarder(logRotator(
numToKeepStr: '20',
daysToKeepStr: '30',
))
}
stages {
stage('Build') {
steps {
script {
nix.flake("default")
}
}
}
stage('Check') {
steps {
script {
sh './result/bin/storage --version'
}
}
}
}
post {
cleanup {
cleanWs()
dir(env.WORKSPACE_TMP) { deleteDir() }
}
}
}

View File

@ -1,11 +1,15 @@
#!/usr/bin/env groovy #!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.13' library 'status-jenkins-lib@v1.9.37'
pipeline { pipeline {
agent { label 'linux && x86_64 && nix-2.24' } agent { label 'macos && aarch64 && nix' }
options { options {
timestamps()
ansiColor('xterm')
timeout(time: 20, unit: 'MINUTES')
disableConcurrentBuilds() disableConcurrentBuilds()
disableRestartFromStage()
/* manage how many builds we keep */ /* manage how many builds we keep */
buildDiscarder(logRotator( buildDiscarder(logRotator(
numToKeepStr: '20', numToKeepStr: '20',
@ -25,13 +29,16 @@ pipeline {
stage('Check') { stage('Check') {
steps { steps {
script { script {
sh './result/bin/codex --version' sh './result/bin/storage --version'
} }
} }
} }
} }
post { post {
cleanup { cleanWs() } cleanup {
cleanWs()
dir(env.WORKSPACE_TMP) { deleteDir() }
}
} }
} }

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -45,7 +45,7 @@ when isMainModule:
let config = CodexConf.load( let config = CodexConf.load(
version = codexFullVersion, version = codexFullVersion,
envVarsPrefix = "codex", envVarsPrefix = "storage",
secondarySources = proc( secondarySources = proc(
config: CodexConf, sources: auto config: CodexConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} = ) {.gcsafe, raises: [ConfigurationError].} =
@ -54,6 +54,16 @@ when isMainModule:
, ,
) )
config.setupLogging() config.setupLogging()
try:
updateLogLevel(config.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
config.setupMetrics() config.setupMetrics()
if not (checkAndCreateDataDir((config.dataDir).string)): if not (checkAndCreateDataDir((config.dataDir).string)):
@ -61,9 +71,6 @@ when isMainModule:
# permissions are insecure. # permissions are insecure.
quit QuitFailure quit QuitFailure
if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)):
quit QuitFailure
trace "Data dir initialized", dir = $config.dataDir trace "Data dir initialized", dir = $config.dataDir
if not (checkAndCreateDataDir((config.dataDir / "repo"))): if not (checkAndCreateDataDir((config.dataDir / "repo"))):
@ -89,15 +96,15 @@ when isMainModule:
try: try:
CodexServer.new(config, privateKey) CodexServer.new(config, privateKey)
except Exception as exc: except Exception as exc:
error "Failed to start Codex", msg = exc.msg error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure quit QuitFailure
## Ctrl+C handling ## Ctrl+C handling
proc doShutdown() = proc doShutdown() =
shutdown = server.stop() shutdown = server.shutdown()
state = CodexStatus.Stopping state = CodexStatus.Stopping
notice "Stopping Codex" notice "Stopping Logos Storage"
proc controlCHandler() {.noconv.} = proc controlCHandler() {.noconv.} =
when defined(windows): when defined(windows):
@ -128,7 +135,7 @@ when isMainModule:
try: try:
waitFor server.start() waitFor server.start()
except CatchableError as error: except CatchableError as error:
error "Codex failed to start", error = error.msg error "Logos Storage failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey, # XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all # but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they # services so they won't crash if we attempt to stop them before they
@ -149,7 +156,7 @@ when isMainModule:
# be assigned before state switches to Stopping # be assigned before state switches to Stopping
waitFor shutdown waitFor shutdown
except CatchableError as error: except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg error "Logos Storage didn't shutdown correctly", error = error.msg
quit QuitFailure quit QuitFailure
notice "Exited codex" notice "Exited Storage"

View File

@ -1,9 +1,9 @@
version = "0.1.0" version = "0.1.0"
author = "Codex Team" author = "Logos Storage Team"
description = "p2p data durability engine" description = "p2p data durability engine"
license = "MIT" license = "MIT"
binDir = "build" binDir = "build"
srcDir = "." srcDir = "."
installFiles = @["build.nims"] installFiles = @["build.nims"]
include "build.nims" include "build.nims"

View File

@ -1,6 +1,5 @@
import ./engine/discovery import ./engine/discovery
import ./engine/advertiser import ./engine/advertiser
import ./engine/engine import ./engine/engine
import ./engine/payments
export discovery, advertiser, engine, payments export discovery, advertiser, engine

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -124,6 +124,10 @@ proc start*(b: Advertiser) {.async: (raises: []).} =
trace "Advertiser start" trace "Advertiser start"
# The advertiser is expected to be started only once.
if b.advertiserRunning:
raiseAssert "Advertiser can only be started once — this should not happen"
proc onBlock(cid: Cid) {.async: (raises: []).} = proc onBlock(cid: Cid) {.async: (raises: []).} =
try: try:
await b.advertiseBlock(cid) await b.advertiseBlock(cid)
@ -133,10 +137,6 @@ proc start*(b: Advertiser) {.async: (raises: []).} =
doAssert(b.localStore.onBlockStored.isNone()) doAssert(b.localStore.onBlockStored.isNone())
b.localStore.onBlockStored = onBlock.some b.localStore.onBlockStored = onBlock.some
if b.advertiserRunning:
warn "Starting advertiser twice"
return
b.advertiserRunning = true b.advertiserRunning = true
for i in 0 ..< b.concurrentAdvReqs: for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop() let fut = b.processQueueLoop()

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -8,6 +8,7 @@
## those terms. ## those terms.
import std/sequtils import std/sequtils
import std/algorithm
import pkg/chronos import pkg/chronos
import pkg/libp2p/cid import pkg/libp2p/cid
@ -38,6 +39,7 @@ const
DefaultConcurrentDiscRequests = 10 DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3 DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds DefaultDiscoveryLoopSleep = 3.seconds
type DiscoveryEngine* = ref object of RootObj type DiscoveryEngine* = ref object of RootObj
@ -51,11 +53,32 @@ type DiscoveryEngine* = ref object of RootObj
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests # Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} = proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try: try:
while b.discEngineRunning: while b.discEngineRunning:
@ -78,8 +101,16 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
trace "Discovery request already in progress", cid trace "Discovery request already in progress", cid
continue continue
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid) let haves = b.peers.peersHave(cid)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock: if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid) let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request b.inFlightDiscReqs[cid] = request
@ -156,6 +187,7 @@ proc new*(
concurrentDiscReqs = DefaultConcurrentDiscRequests, concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep, discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock, minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine = ): DiscoveryEngine =
## Create a discovery engine instance for advertising services ## Create a discovery engine instance for advertising services
## ##
@ -171,4 +203,5 @@ proc new*(
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](), inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep, discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock, minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
) )

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import pkg/nitro
import pkg/questionable/results
import ../peers
export nitro
export results
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
func openLedgerChannel*(
wallet: WalletRef, hub: EthAddress, asset: EthAddress
): ?!ChannelId =
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
if channel =? peer.paymentChannel:
success channel
elif account =? peer.account:
let channel = ?wallet.openLedgerChannel(account.address, Asset)
peer.paymentChannel = channel.some
success channel
else:
failure "no account set for peer"
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
if account =? peer.account:
let asset = Asset
let receiver = account.address
let channel = ?wallet.getOrOpenChannel(peer)
wallet.pay(channel, asset, receiver, amount)
else:
failure "no account set for peer"

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -34,7 +34,7 @@ declareGauge(
const const
DefaultBlockRetries* = 3000 DefaultBlockRetries* = 3000
DefaultRetryInterval* = 500.millis DefaultRetryInterval* = 2.seconds
type type
RetriesExhaustedError* = object of CatchableError RetriesExhaustedError* = object of CatchableError
@ -42,7 +42,7 @@ type
BlockReq* = object BlockReq* = object
handle*: BlockHandle handle*: BlockHandle
inFlight*: bool requested*: ?PeerId
blockRetries*: int blockRetries*: int
startTime*: int64 startTime*: int64
@ -50,12 +50,13 @@ type
blockRetries*: int = DefaultBlockRetries blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) = proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*( proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, inFlight = false self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block ## Add an event for a block
## ##
@ -65,11 +66,13 @@ proc getWantHandle*(
do: do:
let blk = BlockReq( let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"), handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight, requested: requested,
blockRetries: self.blockRetries, blockRetries: self.blockRetries,
startTime: getMonoTime().ticks, startTime: getMonoTime().ticks,
) )
self.blocks[address] = blk self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} = proc cleanUpBlock(data: pointer) {.raises: [].} =
@ -86,9 +89,22 @@ proc getWantHandle*(
return handle return handle
proc getWantHandle*( proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, inFlight = false self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), inFlight) self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*( proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
@ -108,9 +124,6 @@ proc resolve*(
blockReq.handle.complete(bd.blk) blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs) codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else: else:
trace "Block handle already finished", address = bd.address trace "Block handle already finished", address = bd.address
@ -128,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
self.blocks.withValue(address, pending): self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0 result = pending[].blockRetries <= 0
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) = func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Set inflight status for a block ## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
## ##
self.blocks.withValue(address, pending): if self.isRequested(address):
pending[].inFlight = inFlight return false
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##
self.blocks.withValue(address, pending): self.blocks.withValue(address, pending):
result = pending[].inFlight pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool = func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks BlockAddress.init(cid) in self.blocks

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -20,12 +20,11 @@ import pkg/questionable/results
import ../../blocktype as bt import ../../blocktype as bt
import ../../logutils import ../../logutils
import ../protobuf/blockexc as pb import ../protobuf/blockexc as pb
import ../protobuf/payments
import ../../utils/trackedfutures import ../../utils/trackedfutures
import ./networkpeer import ./networkpeer
export networkpeer, payments export networkpeer
logScope: logScope:
topics = "codex blockexcnetwork" topics = "codex blockexcnetwork"
@ -35,22 +34,20 @@ const
DefaultMaxInflight* = 100 DefaultMaxInflight* = 100
type type
WantListHandler* = WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
BlocksDeliveryHandler* = BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).} proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* = BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).} proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).} PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
PaymentHandler* =
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
BlockExcHandlers* = object BlockExcHandlers* = object
onWantList*: WantListHandler onWantList*: WantListHandler
onBlocksDelivery*: BlocksDeliveryHandler onBlocksDelivery*: BlocksDeliveryHandler
onPresence*: BlockPresenceHandler onPresence*: BlockPresenceHandler
onAccount*: AccountHandler onPeerJoined*: PeerEventHandler
onPayment*: PaymentHandler onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc( WantListSender* = proc(
id: PeerId, id: PeerId,
@ -70,18 +67,12 @@ type
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {. PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError]) async: (raises: [CancelledError])
.} .}
AccountSender* =
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
PaymentSender* =
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
BlockExcRequest* = object BlockExcRequest* = object
sendWantList*: WantListSender sendWantList*: WantListSender
sendWantCancellations*: WantCancellationSender sendWantCancellations*: WantCancellationSender
sendBlocksDelivery*: BlocksDeliverySender sendBlocksDelivery*: BlocksDeliverySender
sendPresence*: PresenceSender sendPresence*: PresenceSender
sendAccount*: AccountSender
sendPayment*: PaymentSender
BlockExcNetwork* = ref object of LPProtocol BlockExcNetwork* = ref object of LPProtocol
peers*: Table[PeerId, NetworkPeer] peers*: Table[PeerId, NetworkPeer]
@ -205,131 +196,111 @@ proc sendBlockPresence*(
b.send(id, Message(blockPresences: @presence)) b.send(id, Message(blockPresences: @presence))
proc handleAccount(
network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async: (raises: []).} =
## Handle account info
##
if not network.handlers.onAccount.isNil:
await network.handlers.onAccount(peer.id, account)
proc sendAccount*(
b: BlockExcNetwork, id: PeerId, account: Account
) {.async: (raw: true, raises: [CancelledError]).} =
## Send account info to remote
##
b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(
b: BlockExcNetwork, id: PeerId, payment: SignedState
) {.async: (raw: true, raises: [CancelledError]).} =
## Send payment to remote
##
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
proc handlePayment(
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async: (raises: []).} =
## Handle payment
##
if not network.handlers.onPayment.isNil:
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler( proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} = ) {.async: (raises: []).} =
## handle rpc messages ## handle rpc messages
## ##
if msg.wantList.entries.len > 0: if msg.wantList.entries.len > 0:
b.trackedFutures.track(b.handleWantList(peer, msg.wantList)) self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0: if msg.payload.len > 0:
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload)) self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0: if msg.blockPresences.len > 0:
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences)) self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account): proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
b.trackedFutures.track(b.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment):
b.trackedFutures.track(b.handlePayment(peer, payment))
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer ## Creates or retrieves a BlockExcNetwork Peer
## ##
if peer in b.peers: if peer in self.peers:
return b.peers.getOrDefault(peer, nil) return self.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {. var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError]) async: (raises: [CancelledError])
.} = .} =
try: try:
trace "Getting new connection stream", peer trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec) return await self.switch.dial(peer, Codec)
except CancelledError as error: except CancelledError as error:
raise error raise error
except CatchableError as exc: except CatchableError as exc:
trace "Unable to connect to blockexc peer", exc = exc.msg trace "Unable to connect to blockexc peer", exc = exc.msg
if not isNil(b.getConn): if not isNil(self.getConn):
getConn = b.getConn getConn = self.getConn
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} = let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await b.rpcHandler(p, msg) await self.rpcHandler(p, msg)
# create new pubsub peer # create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler) let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
debug "Created new blockexc peer", peer debug "Created new blockexc peer", peer
b.peers[peer] = blockExcPeer self.peers[peer] = blockExcPeer
return blockExcPeer return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) = proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Perform initial setup, such as want
## list exchange
##
discard b.getOrCreatePeer(peer)
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer ## Dial a peer
## ##
if b.isSelf(peer.peerId): if self.isSelf(peer.peerId):
trace "Skipping dialing self", peer = peer.peerId trace "Skipping dialing self", peer = peer.peerId
return return
if peer.peerId in b.peers: if peer.peerId in self.peers:
trace "Already connected to peer", peer = peer.peerId trace "Already connected to peer", peer = peer.peerId
return return
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) = proc dropPeer*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
trace "Dropping peer", peer
try:
if not self.switch.isNil:
await self.switch.disconnect(peer)
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
discard self.getOrCreatePeer(peer)
if not self.handlers.onPeerJoined.isNil:
await self.handlers.onPeerJoined(peer)
proc handlePeerDeparted*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Cleanup disconnected peer ## Cleanup disconnected peer
## ##
trace "Dropping peer", peer trace "Cleaning up departed peer", peer
b.peers.del(peer) self.peers.del(peer)
if not self.handlers.onPeerDeparted.isNil:
await self.handlers.onPeerDeparted(peer)
method init*(self: BlockExcNetwork) = method init*(self: BlockExcNetwork) {.raises: [].} =
## Perform protocol initialization ## Perform protocol initialization
## ##
proc peerEventHandler( proc peerEventHandler(
peerId: PeerId, event: PeerEvent peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = ): Future[void] {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined: if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId) await self.handlePeerJoined(peerId)
elif event.kind == PeerEventKind.Left:
await self.handlePeerDeparted(peerId)
else: else:
self.dropPeer(peerId) warn "Unknown peer event", event
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
@ -391,23 +362,11 @@ proc new*(
): Future[void] {.async: (raw: true, raises: [CancelledError]).} = ): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence) self.sendBlockPresence(id, presence)
proc sendAccount(
id: PeerId, account: Account
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendAccount(id, account)
proc sendPayment(
id: PeerId, payment: SignedState
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendPayment(id, payment)
self.request = BlockExcRequest( self.request = BlockExcRequest(
sendWantList: sendWantList, sendWantList: sendWantList,
sendWantCancellations: sendWantCancellations, sendWantCancellations: sendWantCancellations,
sendBlocksDelivery: sendBlocksDelivery, sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence, sendPresence: sendPresence,
sendAccount: sendAccount,
sendPayment: sendPayment,
) )
self.init() self.init()

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -24,10 +24,9 @@ logScope:
const DefaultYieldInterval = 50.millis const DefaultYieldInterval = 50.millis
type type
ConnProvider* = ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).} RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
NetworkPeer* = ref object of RootObj NetworkPeer* = ref object of RootObj
id*: PeerId id*: PeerId
@ -65,7 +64,9 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
except CatchableError as err: except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg warn "Exception in blockexc read loop", msg = err.msg
finally: finally:
trace "Detaching read loop", peer = self.id, connId = conn.oid warn "Detaching read loop", peer = self.id, connId = conn.oid
if self.sendConn == conn:
self.sendConn = nil
await conn.close() await conn.close()
proc connect*( proc connect*(
@ -89,7 +90,12 @@ proc send*(
return return
trace "Sending message", peer = self.id, connId = conn.oid trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg)) try:
await conn.writeLp(protobufEncode(msg))
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
func new*( func new*(
T: type NetworkPeer, T: type NetworkPeer,

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -13,40 +13,83 @@ import std/sets
import pkg/libp2p import pkg/libp2p
import pkg/chronos import pkg/chronos
import pkg/nitro
import pkg/questionable import pkg/questionable
import ../protobuf/blockexc import ../protobuf/blockexc
import ../protobuf/payments
import ../protobuf/presence import ../protobuf/presence
import ../../blocktype import ../../blocktype
import ../../logutils import ../../logutils
export payments, nitro const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
type BlockExcPeerCtx* = ref object of RootObj type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price blocks*: Table[BlockAddress, Presence] # remote peer have list
peerWants*: seq[WantListEntry] # remote peers want lists wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us refreshInProgress*: bool # indicates if a refresh is in progress
account*: ?Account # ethereum account of this peer lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
paymentChannel*: ?ChannelId # payment channel id refreshBackoff*: int = 1 # backoff factor for refresh requests
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] = proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
toSeq(self.blocks.keys) let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] = if staleness and self.refreshInProgress:
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] = staleness
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool = proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) = func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
self.blocks[presence.address] = presence self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) = func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
@ -56,10 +99,35 @@ func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) = func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address]) self.cleanPresence(@[address])
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 = proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
var price = 0.u256 ## Adds a block the set of blocks that have been requested to this peer
for a in addresses: ## (its request schedule).
self.blocks.withValue(a, precense): if self.blocksRequested.len == 0:
price += precense[].price self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
price proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -62,21 +62,23 @@ func len*(self: PeerCtxStore): int =
self.peers.len self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address)) toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid)) toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address)) toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) # FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[]) var res: PeersForBlock = (@[], @[])
for peer in self: for peer in self:
if peer.peerHave.anyIt(it == address): if address in peer:
res.with.add(peer) res.with.add(peer)
else: else:
res.without.add(peer) res.without.add(peer)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,7 +9,6 @@
import std/hashes import std/hashes
import std/sequtils import std/sequtils
import pkg/stew/endians2
import message import message
@ -18,14 +17,6 @@ import ../../blocktype
export Message, protobufEncode, protobufDecode export Message, protobufEncode, protobufDecode
export Wantlist, WantType, WantListEntry export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash = proc hash*(e: WantListEntry): Hash =
hash(e.address) hash(e.address)

View File

@ -1,4 +1,4 @@
# Protocol of data exchange between Codex nodes # Protocol of data exchange between Logos Storage nodes
# and Protobuf encoder/decoder for these messages. # and Protobuf encoder/decoder for these messages.
# #
# Eventually all this code should be auto-generated from message.proto. # Eventually all this code should be auto-generated from message.proto.
@ -25,11 +25,15 @@ type
WantListEntry* = object WantListEntry* = object
address*: BlockAddress address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1 priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries entries*: seq[WantListEntry] # A list of wantList entries
@ -47,10 +51,6 @@ type
BlockPresence* = object BlockPresence* = object
address*: BlockAddress address*: BlockAddress
`type`*: BlockPresenceType `type`*: BlockPresenceType
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
AccountMessage* = object
address*: seq[byte] # Ethereum address to which payments should be made
StateChannelUpdate* = object StateChannelUpdate* = object
update*: seq[byte] # Signed Nitro state, serialized as JSON update*: seq[byte] # Signed Nitro state, serialized as JSON
@ -60,8 +60,6 @@ type
payload*: seq[BlockDelivery] payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence] blockPresences*: seq[BlockPresence]
pendingBytes*: uint pendingBytes*: uint
account*: AccountMessage
payment*: StateChannelUpdate
# #
# Encoding Message into seq[byte] in Protobuf format # Encoding Message into seq[byte] in Protobuf format
@ -111,19 +109,6 @@ proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer() var ipb = initProtoBuffer()
ipb.write(1, value.address) ipb.write(1, value.address)
ipb.write(2, value.`type`.uint) ipb.write(2, value.`type`.uint)
ipb.write(3, value.price)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: AccountMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
var ipb = initProtoBuffer()
ipb.write(1, value.update)
ipb.finish() ipb.finish()
pb.write(field, ipb) pb.write(field, ipb)
@ -131,12 +116,10 @@ proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer() var ipb = initProtoBuffer()
ipb.write(1, value.wantList) ipb.write(1, value.wantList)
for v in value.payload: for v in value.payload:
ipb.write(3, v) ipb.write(3, v) # is this meant to be 2?
for v in value.blockPresences: for v in value.blockPresences:
ipb.write(4, v) ipb.write(4, v)
ipb.write(5, value.pendingBytes) ipb.write(5, value.pendingBytes)
ipb.write(6, value.account)
ipb.write(7, value.payment)
ipb.finish() ipb.finish()
ipb.buffer ipb.buffer
@ -236,19 +219,6 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
value.address = ?BlockAddress.decode(ipb) value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field): if ?pb.getField(2, field):
value.`type` = BlockPresenceType(field) value.`type` = BlockPresenceType(field)
discard ?pb.getField(3, value.price)
ok(value)
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
var value = AccountMessage()
discard ?pb.getField(1, value.address)
ok(value)
proc decode*(
_: type StateChannelUpdate, pb: ProtoBuffer
): ProtoResult[StateChannelUpdate] =
var value = StateChannelUpdate()
discard ?pb.getField(1, value.update)
ok(value) ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
@ -259,15 +229,11 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
sublist: seq[seq[byte]] sublist: seq[seq[byte]]
if ?pb.getField(1, ipb): if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb) value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist): if ?pb.getRepeatedField(3, sublist): # meant to be 2?
for item in sublist: for item in sublist:
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item))) value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist): if ?pb.getRepeatedField(4, sublist):
for item in sublist: for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
discard ?pb.getField(5, value.pendingBytes) discard ?pb.getField(5, value.pendingBytes)
if ?pb.getField(6, ipb):
value.account = ?AccountMessage.decode(ipb)
if ?pb.getField(7, ipb):
value.payment = ?StateChannelUpdate.decode(ipb)
ok(value) ok(value)

View File

@ -1,4 +1,4 @@
// Protocol of data exchange between Codex nodes. // Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md // Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3"; syntax = "proto3";
@ -38,21 +38,10 @@ message Message {
message BlockPresence { message BlockPresence {
bytes cid = 1; bytes cid = 1;
BlockPresenceType type = 2; BlockPresenceType type = 2;
bytes price = 3; // Amount of assets to pay for the block (UInt256)
}
message AccountMessage {
bytes address = 1; // Ethereum address to which payments should be made
}
message StateChannelUpdate {
bytes update = 1; // Signed Nitro state, serialized as JSON
} }
Wantlist wantlist = 1; Wantlist wantlist = 1;
repeated Block payload = 3; repeated Block payload = 3; // what happened to 2?
repeated BlockPresence blockPresences = 4; repeated BlockPresence blockPresences = 4;
int32 pendingBytes = 5; int32 pendingBytes = 5;
AccountMessage account = 6;
StateChannelUpdate payment = 7;
} }

View File

@ -1,38 +0,0 @@
{.push raises: [].}
import pkg/stew/byteutils
import pkg/stint
import pkg/nitro
import pkg/questionable
import ./blockexc
export AccountMessage
export StateChannelUpdate
export stint
export nitro
type Account* = object
address*: EthAddress
func init*(_: type AccountMessage, account: Account): AccountMessage =
AccountMessage(address: @(account.address.toArray))
func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress =
var address: array[20, byte]
if bytes.len != address.len:
return EthAddress.none
for i in 0 ..< address.len:
address[i] = bytes[i]
EthAddress(address).some
func init*(_: type Account, message: AccountMessage): ?Account =
without address =? EthAddress.parse(message.address):
return none Account
some Account(address: address)
func init*(_: type StateChannelUpdate, state: SignedState): StateChannelUpdate =
StateChannelUpdate(update: state.toJson.toBytes)
proc init*(_: type SignedState, update: StateChannelUpdate): ?SignedState =
SignedState.fromJson(string.fromBytes(update.update))

View File

@ -17,7 +17,6 @@ type
Presence* = object Presence* = object
address*: BlockAddress address*: BlockAddress
have*: bool have*: bool
price*: UInt256
func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 = func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
if bytes.len > 32: if bytes.len > 32:
@ -25,18 +24,12 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
UInt256.fromBytesBE(bytes).some UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence = func init*(_: type Presence, message: PresenceMessage): ?Presence =
without price =? UInt256.parse(message.price):
return none Presence
some Presence( some Presence(
address: message.address, address: message.address, have: message.`type` == BlockPresenceType.Have
have: message.`type` == BlockPresenceType.Have,
price: price,
) )
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage = func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage( PresenceMessage(
address: presence.address, address: presence.address,
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave, `type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE),
) )

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,16 +9,14 @@
import std/tables import std/tables
import std/sugar import std/sugar
import std/hashes
export tables export tables
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p/[cid, multicodec, multihash] import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils import pkg/stew/[byteutils, endians2]
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
else: else:
"cid: " & $a.cid "cid: " & $a.cid
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid = proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid if a.leaf: a.treeCid else: a.cid

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# TODO: This is super inneficient and needs a rewrite, but it'll do for now # TODO: This is super inneficient and needs a rewrite, but it'll do for now
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -31,7 +28,7 @@ type
ChunkerError* = object of CatchableError ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte] ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {. Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
gcsafe, async: (raises: [ChunkerError, CancelledError]) async: (raises: [ChunkerError, CancelledError])
.} .}
# Reader that splits input data into fixed-size chunks # Reader that splits input data into fixed-size chunks
@ -77,7 +74,7 @@ proc new*(
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var res = 0 var res = 0
try: try:
while res < len: while res < len:
@ -105,7 +102,7 @@ proc new*(
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var total = 0 var total = 0
try: try:
while total < len: while total < len:

View File

@ -1,6 +1,7 @@
{.push raises: [].}
import pkg/chronos import pkg/chronos
import pkg/stew/endians2 import pkg/stew/endians2
import pkg/upraises
import pkg/stint import pkg/stint
type type
@ -8,10 +9,12 @@ type
SecondsSince1970* = int64 SecondsSince1970* = int64
Timeout* = object of CatchableError Timeout* = object of CatchableError
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} = method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
raiseAssert "not implemented" raiseAssert "not implemented"
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} = method waitUntil*(
clock: Clock, time: SecondsSince1970
) {.base, async: (raises: [CancelledError]).} =
raiseAssert "not implemented" raiseAssert "not implemented"
method start*(clock: Clock) {.base, async.} = method start*(clock: Clock) {.base, async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,6 +12,7 @@ import std/strutils
import std/os import std/os
import std/tables import std/tables
import std/cpuinfo import std/cpuinfo
import std/net
import pkg/chronos import pkg/chronos
import pkg/taskpools import pkg/taskpools
@ -19,11 +20,8 @@ import pkg/presto
import pkg/libp2p import pkg/libp2p
import pkg/confutils import pkg/confutils
import pkg/confutils/defs import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2 import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2 import pkg/stew/io2
import ./node import ./node
@ -31,15 +29,10 @@ import ./conf
import ./rng as random import ./rng as random
import ./rest/api import ./rest/api
import ./stores import ./stores
import ./slots
import ./blockexchange import ./blockexchange
import ./utils/fileutils import ./utils/fileutils
import ./erasure
import ./discovery import ./discovery
import ./contracts
import ./systemclock import ./systemclock
import ./contracts/clock
import ./contracts/deployment
import ./utils/addrutils import ./utils/addrutils
import ./namespaces import ./namespaces
import ./codextypes import ./codextypes
@ -56,111 +49,28 @@ type
codexNode: CodexNodeRef codexNode: CodexNodeRef
repoStore: RepoStore repoStore: RepoStore
maintenance: BlockMaintainer maintenance: BlockMaintainer
taskpool: Taskpool
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
proc waitForSync(provider: Provider): Future[void] {.async.} = func config*(self: CodexServer): CodexConf =
var sleepTime = 1 return self.config
trace "Checking sync state of Ethereum provider..."
while await provider.isSyncing:
notice "Waiting for Ethereum provider to sync..."
await sleepAsync(sleepTime.seconds)
if sleepTime < 10:
inc sleepTime
trace "Ethereum provider is synced."
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = func node*(self: CodexServer): CodexNodeRef =
## bootstrap interactions and return contracts return self.codexNode
## using clients, hosts, validators pairings
##
let
config = s.config
repo = s.repoStore
if config.persistence: func repoStore*(self: CodexServer): RepoStore =
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome: return self.repoStore
error "Persistence enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
signer = provider.getSigner(account)
elif keyFile =? config.ethPrivateKey:
without isSecure =? checkSecureFile(keyFile):
error "Could not check file permissions: does Ethereum private key file exist?"
quit QuitFailure
if not isSecure:
error "Ethereum private key file does not have safe file permissions"
quit QuitFailure
without key =? keyFile.readAllChars():
error "Unable to read Ethereum private key file"
quit QuitFailure
without wallet =? EthWallet.new(key.strip(), provider):
error "Invalid Ethereum private key in file"
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config.marketplaceAddress)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
let marketplace = Marketplace.new(marketplaceAddress, signer)
let market = OnChainMarket.new(
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
)
let clock = OnChainClock.new(provider)
var client: ?ClientInteractions
var host: ?HostInteractions
var validator: ?ValidatorInteractions
if config.validator or config.persistence:
s.codexNode.clock = clock
else:
s.codexNode.clock = SystemClock()
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when codex_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
else:
let proofFailures = 0
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
if error =? (await market.loadConfig()).errorOption:
fatal "Cannot load market configuration", error = error.msg
quit QuitFailure
let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing)
host = some HostInteractions.new(clock, sales)
if config.validator:
without validationConfig =?
ValidationConfig.init(
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
), err:
error "Invalid validation parameters", err = err.msg
quit QuitFailure
let validation = Validation.new(clock, market, validationConfig)
validator = some ValidatorInteractions.new(clock, validation)
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} = proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config if s.isStarted:
warn "Storage server already started, skipping"
return
trace "Starting Storage node", config = $s.config
await s.repoStore.start() await s.repoStore.start()
s.maintenance.start() s.maintenance.start()
await s.codexNode.switch.start() await s.codexNode.switch.start()
@ -172,26 +82,60 @@ proc start*(s: CodexServer) {.async.} =
s.codexNode.discovery.updateAnnounceRecord(announceAddrs) s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
s.codexNode.discovery.updateDhtRecord(discoveryAddrs) s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
await s.bootstrapInteractions()
await s.codexNode.start() await s.codexNode.start()
s.restServer.start()
if s.restServer != nil:
s.restServer.start()
s.isStarted = true
proc stop*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node" if not s.isStarted:
warn "Storage is not started"
return
let res = await noCancel allFinishedFailed[void]( notice "Stopping Storage node"
var futures =
@[ @[
s.restServer.stop(),
s.codexNode.switch.stop(), s.codexNode.switch.stop(),
s.codexNode.stop(), s.codexNode.stop(),
s.codexNode.discovery.stop(),
s.repoStore.stop(), s.repoStore.stop(),
s.maintenance.stop(), s.maintenance.stop(),
] ]
)
if s.restServer != nil:
futures.add(s.restServer.stop())
let res = await noCancel allFinishedFailed[void](futures)
s.isStarted = false
if res.failure.len > 0: if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop codex node" raiseAssert "Failed to stop Storage node"
proc close*(s: CodexServer) {.async.} =
var futures =
@[s.codexNode.close(), s.repoStore.close(), s.codexNode.discovery.close()]
let res = await noCancel allFinishedFailed[void](futures)
if not s.taskpool.isNil:
try:
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
proc shutdown*(server: CodexServer) {.async.} =
await server.stop()
await server.close()
proc new*( proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
@ -207,21 +151,21 @@ proc new*(
.withMaxConnections(config.maxPeers) .withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString) .withAgentVersion(config.agentString)
.withSignedPeerRecord(true) .withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr}) .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
.build() .build()
var var
cache: CacheStore = nil cache: CacheStore = nil
taskpool: Taskpool taskPool: Taskpool
try: try:
if config.numThreads == ThreadCount(0): if config.numThreads == ThreadCount(0):
taskpool = Taskpool.new(numThreads = min(countProcessors(), 16)) taskPool = Taskpool.new(numThreads = min(countProcessors(), 16))
else: else:
taskpool = Taskpool.new(numThreads = int(config.numThreads)) taskPool = Taskpool.new(numThreads = int(config.numThreads))
info "Threadpool started", numThreads = taskpool.numThreads info "Threadpool started", numThreads = taskPool.numThreads
except CatchableError as exc: except CatchableError as exc:
raiseAssert("Failure in taskpool initialization:" & exc.msg) raiseAssert("Failure in taskPool initialization:" & exc.msg)
if config.cacheSize > 0'nb: if config.cacheSize > 0'nb:
cache = CacheStore.new(cacheSize = config.cacheSize) cache = CacheStore.new(cacheSize = config.cacheSize)
@ -236,12 +180,15 @@ proc new*(
msg: "Unable to create discovery directory for block store: " & discoveryDir msg: "Unable to create discovery directory for block store: " & discoveryDir
) )
let providersPath = config.dataDir / CodexDhtProvidersNamespace
let discoveryStoreRes = LevelDbDatastore.new(providersPath)
if discoveryStoreRes.isErr:
error "Failed to initialize discovery datastore",
path = providersPath, err = discoveryStoreRes.error.msg
let let
discoveryStore = Datastore( discoveryStore =
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect( Datastore(discoveryStoreRes.expect("Should create discovery datastore!"))
"Should create discovery datastore!"
)
)
discovery = Discovery.new( discovery = Discovery.new(
switch.peerInfo.privateKey, switch.peerInfo.privateKey,
@ -251,7 +198,6 @@ proc new*(
store = discoveryStore, store = discoveryStore,
) )
wallet = WalletRef.new(EthPrivateKey.random())
network = BlockExcNetwork.new(switch) network = BlockExcNetwork.new(switch)
repoData = repoData =
@ -291,35 +237,30 @@ proc new*(
) )
peerStore = PeerCtxStore.new() peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new() pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery) advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery = blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new( engine = BlockExcEngine.new(
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks repoStore, network, blockDiscovery, advertiser, peerStore, pendingBlocks
) )
store = NetworkStore.new(engine, repoStore) store = NetworkStore.new(engine, repoStore)
prover =
if config.prover:
let backend =
config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
else:
none Prover
codexNode = CodexNodeRef.new( codexNode = CodexNodeRef.new(
switch = switch, switch = switch,
networkStore = store, networkStore = store,
engine = engine, engine = engine,
discovery = discovery, discovery = discovery,
prover = prover, taskPool = taskPool,
taskPool = taskpool,
) )
var restServer: RestServerRef = nil
if config.apiBindAddress.isSome:
restServer = RestServerRef restServer = RestServerRef
.new( .new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort), initTAddress(config.apiBindAddress.get(), config.apiPort),
bufferSize = (1024 * 64), bufferSize = (1024 * 64),
maxRequestBodySize = int.high, maxRequestBodySize = int.high,
) )
@ -333,4 +274,5 @@ proc new*(
restServer: restServer, restServer: restServer,
repoStore: repoStore, repoStore: repoStore,
maintenance: maintenance, maintenance: maintenance,
taskPool: taskPool,
) )

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -26,34 +26,15 @@ export tables
const const
# Size of blocks for storage / network exchange, # Size of blocks for storage / network exchange,
DefaultBlockSize* = NBytes 1024 * 64 DefaultBlockSize* = NBytes 1024 * 64
DefaultCellSize* = NBytes 2048
# Proving defaults
DefaultMaxSlotDepth* = 32
DefaultMaxDatasetDepth* = 8
DefaultBlockDepth* = 5
DefaultCellElms* = 67
DefaultSamplesNum* = 5
# hashes # hashes
Sha256HashCodec* = multiCodec("sha2-256") Sha256HashCodec* = multiCodec("sha2-256")
Sha512HashCodec* = multiCodec("sha2-512")
Pos2Bn128SpngCodec* = multiCodec("poseidon2-alt_bn_128-sponge-r2")
Pos2Bn128MrklCodec* = multiCodec("poseidon2-alt_bn_128-merkle-2kb")
ManifestCodec* = multiCodec("codex-manifest") ManifestCodec* = multiCodec("codex-manifest")
DatasetRootCodec* = multiCodec("codex-root") DatasetRootCodec* = multiCodec("codex-root")
BlockCodec* = multiCodec("codex-block") BlockCodec* = multiCodec("codex-block")
SlotRootCodec* = multiCodec("codex-slot-root")
SlotProvingRootCodec* = multiCodec("codex-proving-root")
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec] CodexPrimitivesCodecs* = [ManifestCodec, DatasetRootCodec, BlockCodec]
CodexPrimitivesCodecs* = [
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
CodexSlotCellCodec,
]
proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] = proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
## Initialize padding blocks table ## Initialize padding blocks table
@ -66,8 +47,7 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
let let
emptyData: seq[byte] = @[] emptyData: seq[byte] = @[]
PadHashes = { PadHashes = {
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure, Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
}.toTable }.toTable
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]() var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
{.pop.} {.pop.}
import std/options import std/options
import std/parseutils
import std/strutils import std/strutils
import std/typetraits import std/typetraits
import std/net
import pkg/chronos import pkg/chronos
import pkg/chronicles/helpers import pkg/chronicles/helpers
@ -27,13 +29,11 @@ import pkg/confutils/std/net
import pkg/toml_serialization import pkg/toml_serialization
import pkg/metrics import pkg/metrics
import pkg/metrics/chronos_httpserver import pkg/metrics/chronos_httpserver
import pkg/stew/shims/net as stewnet
import pkg/stew/shims/parseutils
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/libp2p import pkg/libp2p
import pkg/ethers
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/base64
import ./codextypes import ./codextypes
import ./discovery import ./discovery
@ -44,15 +44,13 @@ import ./utils
import ./nat import ./nat
import ./utils/natutils import ./utils/natutils
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
from ./validationconfig import MaxSlots, ValidationGroups
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export export
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas DefaultBlockRetries
type ThreadCount* = distinct Natural type ThreadCount* = distinct Natural
@ -61,21 +59,18 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.}
proc defaultDataDir*(): string = proc defaultDataDir*(): string =
let dataDir = let dataDir =
when defined(windows): when defined(windows):
"AppData" / "Roaming" / "Codex" "AppData" / "Roaming" / "Storage"
elif defined(macosx): elif defined(macosx):
"Library" / "Application Support" / "Codex" "Library" / "Application Support" / "Storage"
else: else:
".cache" / "codex" ".cache" / "storage"
getHomeDir() / dataDir getHomeDir() / dataDir
const const
codex_enable_api_debug_peers* {.booldefine.} = false storage_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false storage_enable_log_counter* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
DefaultCircuitDir* = defaultDataDir() / "circuits"
DefaultThreadCount* = ThreadCount(0) DefaultThreadCount* = ThreadCount(0)
type type
@ -83,10 +78,6 @@ type
noCmd noCmd
persistence persistence
PersistenceCmd* {.pure.} = enum
noCmd
prover
LogKind* {.pure.} = enum LogKind* {.pure.} = enum
Auto = "auto" Auto = "auto"
Colors = "colors" Colors = "colors"
@ -137,9 +128,9 @@ type
.}: Port .}: Port
dataDir* {. dataDir* {.
desc: "The directory where codex will store configuration and data", desc: "The directory where Storage will store configuration and data",
defaultValue: DefaultDataDir, defaultValue: defaultDataDir(),
defaultValueDesc: $DefaultDataDir, defaultValueDesc: "",
abbr: "d", abbr: "d",
name: "data-dir" name: "data-dir"
.}: OutDir .}: OutDir
@ -198,14 +189,16 @@ type
.}: ThreadCount .}: ThreadCount
agentString* {. agentString* {.
defaultValue: "Codex", defaultValue: "Logos Storage",
desc: "Node agent string which is used as identifier in network", desc: "Node agent string which is used as identifier in network",
name: "agent-string" name: "agent-string"
.}: string .}: string
apiBindAddress* {. apiBindAddress* {.
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr" desc: "The REST API bind address",
.}: string defaultValue: "127.0.0.1".some,
name: "api-bindaddr"
.}: Option[string]
apiPort* {. apiPort* {.
desc: "The REST Api port", desc: "The REST Api port",
@ -263,6 +256,13 @@ type
name: "block-mn" name: "block-mn"
.}: int .}: int
blockRetries* {.
desc: "Number of times to retry fetching a block before giving up",
defaultValue: DefaultBlockRetries,
defaultValueDesc: $DefaultBlockRetries,
name: "block-retries"
.}: int
cacheSize* {. cacheSize* {.
desc: desc:
"The size of the block cache, 0 disables the cache - " & "The size of the block cache, 0 disables the cache - " &
@ -277,206 +277,14 @@ type
desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden
.}: Option[string] .}: Option[string]
case cmd* {.defaultValue: noCmd, command.}: StartUpCmd
of persistence:
ethProvider* {.
desc: "The URL of the JSON-RPC API of the Ethereum node",
defaultValue: "ws://localhost:8545",
name: "eth-provider"
.}: string
ethAccount* {.
desc: "The Ethereum account that is used for storage contracts",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "eth-account"
.}: Option[EthAddress]
ethPrivateKey* {.
desc: "File containing Ethereum private key for storage contracts",
defaultValue: string.none,
defaultValueDesc: "",
name: "eth-private-key"
.}: Option[string]
marketplaceAddress* {.
desc: "Address of deployed Marketplace contract",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "marketplace-address"
.}: Option[EthAddress]
# TODO: should go behind a feature flag
simulateProofFailures* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled.",
defaultValue: 0,
name: "simulate-proof-failures",
hidden
.}: int
validator* {.
desc: "Enables validator, requires an Ethereum node",
defaultValue: false,
name: "validator"
.}: bool
validatorMaxSlots* {.
desc: "Maximum number of slots that the validator monitors",
longDesc:
"If set to 0, the validator will not limit " &
"the maximum number of slots it monitors",
defaultValue: 1000,
name: "validator-max-slots"
.}: MaxSlots
validatorGroups* {.
desc: "Slot validation groups",
longDesc:
"A number indicating total number of groups into " &
"which the whole slot id space will be divided. " &
"The value must be in the range [2, 65535]. " &
"If not provided, the validator will observe " &
"the whole slot id space and the value of " &
"the --validator-group-index parameter will be ignored. " &
"Powers of twos are advised for even distribution",
defaultValue: ValidationGroups.none,
name: "validator-groups"
.}: Option[ValidationGroups]
validatorGroupIndex* {.
desc: "Slot validation group index",
longDesc:
"The value provided must be in the range " &
"[0, validatorGroups). Ignored when --validator-groups " &
"is not provided. Only slot ids satisfying condition " &
"[(slotId mod validationGroups) == groupIndex] will be " &
"observed by the validator",
defaultValue: 0,
name: "validator-group-index"
.}: uint16
rewardRecipient* {.
desc: "Address to send payouts to (eg rewards and refunds)",
name: "reward-recipient"
.}: Option[EthAddress]
marketplaceRequestCacheSize* {.
desc:
"Maximum number of StorageRequests kept in memory." &
"Reduces fetching of StorageRequest data from the contract.",
defaultValue: DefaultRequestCacheSize,
defaultValueDesc: $DefaultRequestCacheSize,
name: "request-cache-size",
hidden
.}: uint16
maxPriorityFeePerGas* {.
desc:
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
defaultValue: DefaultMaxPriorityFeePerGas,
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
name: "max-priority-fee-per-gas",
hidden
.}: uint64
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Codex will store proof circuit data",
defaultValue: DefaultCircuitDir,
defaultValueDesc: $DefaultCircuitDir,
abbr: "cd",
name: "circuit-dir"
.}: OutDir
circomR1cs* {.
desc: "The r1cs file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.r1cs",
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs",
name: "circom-r1cs"
.}: InputFile
circomWasm* {.
desc: "The wasm file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.wasm",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm",
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.zkey",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey",
name: "circom-zkey"
.}: InputFile
# TODO: should probably be hidden and behind a feature flag
circomNoZkey* {.
desc: "Ignore the zkey file - use only for testing!",
defaultValue: false,
name: "circom-no-zkey"
.}: bool
numProofSamples* {.
desc: "Number of samples to prove",
defaultValue: DefaultSamplesNum,
defaultValueDesc: $DefaultSamplesNum,
name: "proof-samples"
.}: int
maxSlotDepth* {.
desc: "The maximum depth of the slot tree",
defaultValue: DefaultMaxSlotDepth,
defaultValueDesc: $DefaultMaxSlotDepth,
name: "max-slot-depth"
.}: int
maxDatasetDepth* {.
desc: "The maximum depth of the dataset tree",
defaultValue: DefaultMaxDatasetDepth,
defaultValueDesc: $DefaultMaxDatasetDepth,
name: "max-dataset-depth"
.}: int
maxBlockDepth* {.
desc: "The maximum depth of the network block merkle tree",
defaultValue: DefaultBlockDepth,
defaultValueDesc: $DefaultBlockDepth,
name: "max-block-depth"
.}: int
maxCellElms* {.
desc: "The maximum number of elements in a cell",
defaultValue: DefaultCellElms,
defaultValueDesc: $DefaultCellElms,
name: "max-cell-elements"
.}: int
of PersistenceCmd.noCmd:
discard
of StartUpCmd.noCmd:
discard # end of persistence
EthAddress* = ethers.Address
logutils.formatIt(LogFormat.textLines, EthAddress):
it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress):
%it
func defaultAddress*(conf: CodexConf): IpAddress = func defaultAddress*(conf: CodexConf): IpAddress =
result = static parseIpAddress("127.0.0.1") result = static parseIpAddress("127.0.0.1")
func defaultNatConfig*(): NatConfig = func defaultNatConfig*(): NatConfig =
result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
func persistence*(self: CodexConf): bool =
self.cmd == StartUpCmd.persistence
func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string = proc getCodexVersion(): string =
let tag = strip(staticExec("git tag")) let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace: if tag.isEmptyOrWhitespace:
return "untagged build" return "untagged build"
return tag return tag
@ -486,109 +294,120 @@ proc getCodexRevision(): string =
var res = strip(staticExec("git rev-parse --short HEAD")) var res = strip(staticExec("git rev-parse --short HEAD"))
return res return res
proc getCodexContractsRevision(): string =
let res = strip(staticExec("git rev-parse --short HEAD:vendor/codex-contracts-eth"))
return res
proc getNimBanner(): string = proc getNimBanner(): string =
staticExec("nim --version | grep Version") staticExec("nim --version | grep Version")
const const
codexVersion* = getCodexVersion() codexVersion* = getCodexVersion()
codexRevision* = getCodexRevision() codexRevision* = getCodexRevision()
codexContractsRevision* = getCodexContractsRevision()
nimBanner* = getNimBanner() nimBanner* = getNimBanner()
codexFullVersion* = codexFullVersion* =
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" & "Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"Codex contracts revision: " & codexContractsRevision & "\p" & nimBanner "\p"
proc parseCmdArg*( proc parseCmdArg*(
T: typedesc[MultiAddress], input: string T: typedesc[MultiAddress], input: string
): MultiAddress {.upraises: [ValueError].} = ): MultiAddress {.raises: [ValueError].} =
var ma: MultiAddress var ma: MultiAddress
try: try:
let res = MultiAddress.init(input) let res = MultiAddress.init(input)
if res.isOk: if res.isOk:
ma = res.get() ma = res.get()
else: else:
warn "Invalid MultiAddress", input = input, error = res.error() fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure quit QuitFailure
except LPError as exc: except LPError as exc:
warn "Invalid MultiAddress uri", uri = input, error = exc.msg fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
quit QuitFailure quit QuitFailure
ma ma
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
let count = parseInt(input) try:
if count != 0 and count < 2: let count = parseInt(p)
warn "Invalid number of threads", input = input if count != 0 and count < 2:
quit QuitFailure return err("Invalid number of threads: " & p)
ThreadCount(count) return ok(ThreadCount(count))
except ValueError as e:
return err("Invalid number of threads: " & p & ", error=" & e.msg)
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = proc parseCmdArg*(T: type ThreadCount, input: string): T =
let val = ThreadCount.parse(input)
if val.isErr:
fatal "Cannot parse the thread count.", input = input, error = val.error()
quit QuitFailure
return val.get()
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
var res: SignedPeerRecord var res: SignedPeerRecord
try: try:
if not res.fromURI(uri): if not res.fromURI(p):
warn "Invalid SignedPeerRecord uri", uri = uri return err("The uri is not a valid SignedPeerRecord: " & p)
quit QuitFailure return ok(res)
except LPError as exc: except LPError, Base64Error:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg let e = getCurrentException()
quit QuitFailure return err(e.msg)
except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
res
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} = proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
let res = SignedPeerRecord.parse(uri)
if res.isErr:
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
quit QuitFailure
return res.get()
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
case p.toLowerAscii case p.toLowerAscii
of "any": of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
of "none": of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
of "upnp": of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
of "pmp": of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
else: else:
if p.startsWith("extip:"): if p.startsWith("extip:"):
try: try:
let ip = parseIpAddress(p[6 ..^ 1]) let ip = parseIpAddress(p[6 ..^ 1])
NatConfig(hasExtIp: true, extIp: ip) return ok(NatConfig(hasExtIp: true, extIp: ip))
except ValueError: except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1] let error = "Not a valid IP address: " & p[6 ..^ 1]
raise newException(ValueError, error) return err(error)
else: else:
let error = "Not a valid NAT option: " & p return err("Not a valid NAT option: " & p)
raise newException(ValueError, error)
proc parseCmdArg*(T: type NatConfig, p: string): T =
let res = NatConfig.parse(p)
if res.isErr:
fatal "Cannot parse the NAT config.", error = res.error(), input = p
quit QuitFailure
return res.get()
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] = proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[] return @[]
proc parseCmdArg*(T: type EthAddress, address: string): T = func parse*(T: type NBytes, p: string): Result[NBytes, string] =
EthAddress.init($address).get() var num = 0'i64
let count = parseSize(p, num, alwaysBin = true)
if count == 0:
return err("Invalid number of bytes: " & p)
return ok(NBytes(num))
proc parseCmdArg*(T: type NBytes, val: string): T = proc parseCmdArg*(T: type NBytes, val: string): T =
var num = 0'i64 let res = NBytes.parse(val)
let count = parseSize(val, num, alwaysBin = true) if res.isErr:
if count == 0: fatal "Cannot parse NBytes.", error = res.error(), input = val
warn "Invalid number of bytes", nbytes = val
quit QuitFailure quit QuitFailure
NBytes(num) return res.get()
proc parseCmdArg*(T: type Duration, val: string): T = proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration var dur: Duration
let count = parseDuration(val, dur) let count = parseDuration(val, dur)
if count == 0: if count == 0:
warn "Cannot parse duration", dur = dur fatal "Cannot parse duration", dur = dur
quit QuitFailure quit QuitFailure
dur dur
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.upraises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
without uri =? r.readValue(string).catch, err: without uri =? r.readValue(string).catch, err:
error "invalid SignedPeerRecord configuration value", error = err.msg error "invalid SignedPeerRecord configuration value", error = err.msg
@ -597,7 +416,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
try: try:
val = SignedPeerRecord.parseCmdArg(uri) val = SignedPeerRecord.parseCmdArg(uri)
except LPError as err: except LPError as err:
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
quit QuitFailure quit QuitFailure
proc readValue*(r: var TomlReader, val: var MultiAddress) = proc readValue*(r: var TomlReader, val: var MultiAddress) =
@ -609,12 +428,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk: if res.isOk:
val = res.get() val = res.get()
else: else:
warn "Invalid MultiAddress", input = input, error = res.error() fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure quit QuitFailure
proc readValue*( proc readValue*(
r: var TomlReader, val: var NBytes r: var TomlReader, val: var NBytes
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
var value = 0'i64 var value = 0'i64
var str = r.readValue(string) var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true) let count = parseSize(str, value, alwaysBin = true)
@ -625,7 +444,7 @@ proc readValue*(
proc readValue*( proc readValue*(
r: var TomlReader, val: var ThreadCount r: var TomlReader, val: var ThreadCount
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string) var str = r.readValue(string)
try: try:
val = parseCmdArg(ThreadCount, str) val = parseCmdArg(ThreadCount, str)
@ -634,7 +453,7 @@ proc readValue*(
proc readValue*( proc readValue*(
r: var TomlReader, val: var Duration r: var TomlReader, val: var Duration
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string) var str = r.readValue(string)
var dur: Duration var dur: Duration
let count = parseDuration(str, dur) let count = parseDuration(str, dur)
@ -653,9 +472,6 @@ proc readValue*(
raise newException(SerializationError, err.msg) raise newException(SerializationError, err.msg)
# no idea why confutils needs this: # no idea why confutils needs this:
proc completeCmdArg*(T: type EthAddress, val: string): seq[string] =
discard
proc completeCmdArg*(T: type NBytes, val: string): seq[string] = proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
discard discard
@ -701,7 +517,7 @@ proc stripAnsi*(v: string): string =
res res
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
# Updates log levels (without clearing old ones) # Updates log levels (without clearing old ones)
let directives = logLevel.split(";") let directives = logLevel.split(";")
try: try:
@ -770,7 +586,7 @@ proc setupLogging*(conf: CodexConf) =
of LogKind.None: of LogKind.None:
noOutput noOutput
when codex_enable_log_counter: when storage_enable_log_counter:
var counter = 0.uint64 var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) = proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter) inc(counter)
@ -781,15 +597,6 @@ proc setupLogging*(conf: CodexConf) =
else: else:
defaultChroniclesStream.outputs[0].writer = writer defaultChroniclesStream.outputs[0].writer = writer
try:
updateLogLevel(conf.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
proc setupMetrics*(config: CodexConf) = proc setupMetrics*(config: CodexConf) =
if config.metricsEnabled: if config.metricsEnabled:
let metricsAddress = config.metricsAddress let metricsAddress = config.metricsAddress

View File

@ -0,0 +1,2 @@
const ContentIdsExts =
[multiCodec("codex-root"), multiCodec("codex-manifest"), multiCodec("codex-block")]

View File

@ -1,11 +0,0 @@
import contracts/requests
import contracts/marketplace
import contracts/market
import contracts/interactions
import contracts/provider
export requests
export marketplace
export market
export interactions
export provider

View File

@ -1,148 +0,0 @@
Codex Contracts in Nim
=======================
Nim API for the [Codex smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1].
Smart contract
--------------
Connecting to the smart contract on an Ethereum node:
```nim
import codex/contracts
import ethers
let address = # fill in address where the contract was deployed
let provider = JsonRpcProvider.new("ws://localhost:8545")
let marketplace = Marketplace.new(address, provider)
```
Setup client and host so that they can sign transactions; here we use the first
two accounts on the Ethereum node:
```nim
let accounts = await provider.listAccounts()
let client = provider.getSigner(accounts[0])
let host = provider.getSigner(accounts[1])
```
Storage requests
----------------
Creating a request for storage:
```nim
let request : StorageRequest = (
client: # address of the client requesting storage
duration: # duration of the contract in seconds
size: # size in bytes
contentHash: # SHA256 hash of the content that's going to be stored
proofProbability: # require a storage proof roughly once every N periods
maxPrice: # maximum price the client is willing to pay
expiry: # expiration time of the request (in unix time)
nonce: # random nonce to differentiate between similar requests
)
```
When a client wants to submit this request to the network, it needs to pay the
maximum price to the smart contract in advance. The difference between the
maximum price and the offered price will be reimbursed later.
Once the payment has been prepared, the client can submit the request to the
network:
```nim
await storage
.connect(client)
.requestStorage(request)
```
Storage offers
--------------
Creating a storage offer:
```nim
let offer: StorageOffer = (
host: # address of the host that is offering storage
requestId: request.id,
price: # offered price (in number of tokens)
expiry: # expiration time of the offer (in unix time)
)
```
Hosts submits an offer:
```nim
await storage
.connect(host)
.offerStorage(offer)
```
Client selects an offer:
```nim
await storage
.connect(client)
.selectOffer(offer.id)
```
Starting and finishing a storage contract
-----------------------------------------
The host whose offer got selected can start the storage contract once it
received the data that needs to be stored:
```nim
await storage
.connect(host)
.startContract(offer.id)
```
Once the storage contract is finished, the host can release payment:
```nim
await storage
.connect(host)
.finishContract(id)
```
Storage proofs
--------------
Time is divided into periods, and each period a storage proof may be required
from the host. The odds of requiring a storage proof are negotiated through the
storage request. For more details about the timing of storage proofs, please
refer to the [design document][2].
At the start of each period of time, the host can check whether a storage proof
is required:
```nim
let isProofRequired = await storage.isProofRequired(offer.id)
```
If a proof is required, the host can submit it before the end of the period:
```nim
await storage
.connect(host)
.submitProof(id, proof)
```
If a proof is not submitted, then a validator can mark a proof as missing:
```nim
await storage
.connect(validator)
.markProofAsMissing(id, period)
```
[1]: https://github.com/status-im/codex-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md

View File

@ -1,78 +0,0 @@
import std/times
import pkg/ethers
import pkg/questionable
import pkg/chronos
import pkg/stint
import ../clock
import ../conf
import ../utils/trackedfutures
export clock
logScope:
topics = "contracts clock"
type OnChainClock* = ref object of Clock
provider: Provider
subscription: Subscription
offset: times.Duration
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber:
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
let computerTime = getTime()
clock.offset = blockTime - computerTime
clock.blockNumber = number
trace "updated clock",
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async: (raises: []).} =
try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest)
except CatchableError as error:
debug "error updating clock: ", error = error.msg
method start*(clock: OnChainClock) {.async.} =
if clock.started:
return
proc onBlock(blckResult: ?!Block) =
if eventError =? blckResult.errorOption:
error "There was an error in block subscription", msg = eventError.msg
return
# ignore block parameter; hardhat may call this with pending blocks
clock.trackedFutures.track(clock.update())
await clock.update()
clock.subscription = await clock.provider.subscribe(onBlock)
clock.started = true
method stop*(clock: OnChainClock) {.async.} =
if not clock.started:
return
await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -1,104 +0,0 @@
import pkg/contractabi
import pkg/ethers/contracts/fields
import pkg/questionable/results
export contractabi
const DefaultRequestCacheSize* = 128.uint16
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
type
MarketplaceConfig* = object
collateral*: CollateralConfig
proofs*: ProofConfig
reservations*: SlotReservationsConfig
requestDurationLimit*: uint64
CollateralConfig* = object
repairRewardPercentage*: uint8
# percentage of remaining collateral slot has after it has been freed
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
slashPercentage*: uint8 # percentage of the collateral that is slashed
validatorRewardPercentage*: uint8
# percentage of the slashed amount going to the validators
ProofConfig* = object
period*: uint64 # proofs requirements are calculated per period (in seconds)
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
downtime*: uint8 # ignore this much recent blocks for proof requirements
downtimeProduct*: uint8
zkeyHash*: string # hash of the zkey file which is linked to the verifier
# Ensures the pointer does not remain in downtime for many consecutive
# periods. For each period increase, move the pointer `pointerProduct`
# blocks. Should be a prime number to ensure there are no cycles.
SlotReservationsConfig* = object
maxReservations*: uint8
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
ProofConfig(
period: tupl[0],
timeout: tupl[1],
downtime: tupl[2],
downtimeProduct: tupl[3],
zkeyHash: tupl[4],
)
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
SlotReservationsConfig(maxReservations: tupl[0])
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
CollateralConfig(
repairRewardPercentage: tupl[0],
maxNumberOfSlashes: tupl[1],
slashPercentage: tupl[2],
validatorRewardPercentage: tupl[3],
)
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
MarketplaceConfig(
collateral: tupl[0],
proofs: tupl[1],
reservations: tupl[2],
requestDurationLimit: tupl[3],
)
func solidityType*(_: type SlotReservationsConfig): string =
solidityType(SlotReservationsConfig.fieldTypes)
func solidityType*(_: type ProofConfig): string =
solidityType(ProofConfig.fieldTypes)
func solidityType*(_: type CollateralConfig): string =
solidityType(CollateralConfig.fieldTypes)
func solidityType*(_: type MarketplaceConfig): string =
solidityType(MarketplaceConfig.fieldTypes)
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: CollateralConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
let tupl = ?decoder.read(ProofConfig.fieldTypes)
success ProofConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
success SlotReservationsConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
success CollateralConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T =
let tupl = ?decoder.read(MarketplaceConfig.fieldTypes)
success MarketplaceConfig.fromTuple(tupl)

View File

@ -1,48 +0,0 @@
import std/os
import std/tables
import pkg/ethers
import pkg/questionable
import ../conf
import ../logutils
import ./marketplace
type Deployment* = ref object
provider: Provider
marketplaceAddressOverride: ?Address
const knownAddresses = {
# Hardhat localhost network
"31337":
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
# Taiko Alpha-3 Testnet
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Jun 11 2025 17:04:56 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0xd53a4181862f42641ccA02Fb4CED7D7f19C6920B")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
let id = chainId.toString(10)
notice "Looking for well-known contract address with ChainID ", chainId = id
if not (id in knownAddresses):
return none Address
return knownAddresses[id].getOrDefault($T, Address.none)
proc new*(
_: type Deployment,
provider: Provider,
marketplaceAddressOverride: ?Address = none Address,
): Deployment =
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
when contract is Marketplace:
if address =? deployment.marketplaceAddressOverride:
return some address
let chainId = await deployment.provider.getChainId()
return contract.getKnownAddress(chainId)

View File

@ -1,9 +0,0 @@
import ./interactions/interactions
import ./interactions/hostinteractions
import ./interactions/clientinteractions
import ./interactions/validatorinteractions
export interactions
export hostinteractions
export clientinteractions
export validatorinteractions

View File

@ -1,26 +0,0 @@
import pkg/ethers
import ../../purchasing
import ../../logutils
import ../market
import ../clock
import ./interactions
export purchasing
export logutils
type ClientInteractions* = ref object of ContractInteractions
purchasing*: Purchasing
proc new*(
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
): ClientInteractions =
ClientInteractions(clock: clock, purchasing: purchasing)
proc start*(self: ClientInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.purchasing.start()
proc stop*(self: ClientInteractions) {.async.} =
await self.purchasing.stop()
await procCall ContractInteractions(self).stop()

View File

@ -1,24 +0,0 @@
import pkg/chronos
import ../../logutils
import ../../sales
import ./interactions
export sales
export logutils
type HostInteractions* = ref object of ContractInteractions
sales*: Sales
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
## Create a new HostInteractions instance
##
HostInteractions(clock: clock, sales: sales)
method start*(self: HostInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.sales.start()
method stop*(self: HostInteractions) {.async.} =
await self.sales.stop()
await procCall ContractInteractions(self).start()

View File

@ -1,15 +0,0 @@
import pkg/ethers
import ../clock
import ../marketplace
import ../market
export clock
type ContractInteractions* = ref object of RootObj
clock*: Clock
method start*(self: ContractInteractions) {.async, base.} =
discard
method stop*(self: ContractInteractions) {.async, base.} =
discard

View File

@ -1,20 +0,0 @@
import ./interactions
import ../../validation
export validation
type ValidatorInteractions* = ref object of ContractInteractions
validation: Validation
proc new*(
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
): ValidatorInteractions =
ValidatorInteractions(clock: clock, validation: validation)
proc start*(self: ValidatorInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.validation.start()
proc stop*(self: ValidatorInteractions) {.async.} =
await self.validation.stop()
await procCall ContractInteractions(self).stop()

View File

@ -1,667 +0,0 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/upraises
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
import ../logutils
import ../market
import ./marketplace
import ./proofs
import ./provider
export market
logScope:
topics = "marketplace onchain market"
type
OnChainMarket* = ref object of Market
contract: Marketplace
signer: Signer
rewardRecipient: ?Address
configuration: ?MarketplaceConfig
requestCache: LruCache[string, StorageRequest]
allowanceLock: AsyncLock
MarketSubscription = market.Subscription
EventSubscription = ethers.Subscription
OnChainMarketSubscription = ref object of MarketSubscription
eventSubscription: EventSubscription
func new*(
_: type OnChainMarket,
contract: Marketplace,
rewardRecipient = Address.none,
requestCacheSize: uint16 = DefaultRequestCacheSize,
): OnChainMarket =
without signer =? contract.signer:
raiseAssert("Marketplace contract should have a signer")
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
OnChainMarket(
contract: contract,
signer: signer,
rewardRecipient: rewardRecipient,
requestCache: requestCache,
)
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try:
body
except EthersError as error:
raiseMarketError(error.msgDetail.prefixWith(msg))
proc config(
market: OnChainMarket
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
without resolvedConfig =? market.configuration:
if err =? (await market.loadConfig()).errorOption:
raiseMarketError(err.msg)
without config =? market.configuration:
raiseMarketError("Failed to access to config from the Marketplace contract")
return config
return resolvedConfig
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
if market.allowanceLock.isNil:
market.allowanceLock = newAsyncLock()
await market.allowanceLock.acquire()
try:
body
finally:
try:
market.allowanceLock.release()
except AsyncLockError as error:
raise newException(Defect, error.msg, error)
proc approveFunds(
market: OnChainMarket, amount: UInt256
) {.async: (raises: [CancelledError, MarketError]).} =
debug "Approving tokens", amount
convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer)
let owner = await market.signer.getAddress()
let spender = market.contract.address
market.withAllowanceLock:
let allowance = await token.allowance(owner, spender)
discard await token.approve(spender, allowance + amount).confirm(1)
method loadConfig*(
market: OnChainMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
without config =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return success()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
)
method getZkeyHash*(
market: OnChainMarket
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
let config = await market.config()
return some config.proofs.zkeyHash
method getSigner*(
market: OnChainMarket
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get signer address"):
return await market.signer.getAddress()
method periodicity*(
market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
let period = config.proofs.period
return Periodicity(seconds: period)
method proofTimeout*(
market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.timeout
method repairRewardPercentage*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.requestDurationLimit
method proofDowntime*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError("Failed to get my requests"):
return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(
market: OnChainMarket, request: StorageRequest
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to request storage"):
debug "Requesting storage"
await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1)
method getRequest*(
market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
try:
let key = $id
if key in market.requestCache:
return some market.requestCache[key]
let request = await market.contract.getRequest(id)
market.requestCache[key] = request
return some request
except Marketplace_UnknownRequest, KeyError:
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
return none StorageRequest
except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError("Failed to get request state"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides)
except Marketplace_UnknownRequest:
return none RequestState
method slotState*(
market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id)
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id)
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
if address != Address.default:
return some address
else:
return none Address
method currentCollateral*(
market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError("Failed to get active slot"):
try:
return some await market.contract.getActiveSlot(slotId)
except Marketplace_SlotIsFree:
return none Slot
method fillSlot(
market: OnChainMarket,
requestId: RequestId,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fill slot"):
logScope:
requestId
slotIndex
try:
await market.approveFunds(collateral)
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
trace "calling fillSlot on contract"
discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method freeSlot*(
market: OnChainMarket, slotId: SlotId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to free slot"):
try:
var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use
# the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner()
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient
)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
freeSlot = market.contract.freeSlot(
slotId,
rewardRecipient, # --reward-recipient
collateralRecipient, # SP's address
overrides,
)
else:
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
freeSlot = market.contract.freeSlot(slotId, overrides)
discard await freeSlot.confirm(1)
except Marketplace_SlotIsFree as parent:
raise newException(
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
)
method withdrawFunds(
market: OnChainMarket, requestId: RequestId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides)
except Marketplace_SlotIsFree:
return false
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get future proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides)
except Marketplace_SlotIsFree:
return false
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(
market: OnChainMarket, id: SlotId, proof: Groth16Proof
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to submit proof"):
try:
discard await market.contract.submitProof(id, proof).confirm(1)
except Proofs_InvalidProof as parent:
raise newException(
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"):
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
method canMarkProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async: (raises: [CancelledError]).} =
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
return true
except EthersError as e:
trace "Proof cannot be marked as missing", msg = e.msg
return false
method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"):
try:
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} =
convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
callback(event.requestId, event.ask, event.expiry)
convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: uint64,
callback: OnSlotFilled,
): Future[MarketSubscription] {.async.} =
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return
callback(event.id)
convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
await subscription.eventSubscription.unsubscribe()
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock)
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events from block"):
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)
method slotCollateral*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let slotState = await market.slotState(slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
without repairRewardPercentage =?
market.configuration .? collateral .? repairRewardPercentage:
return failure newException(
MarketError,
"Failure calculating the slotCollateral, cannot get the reward percentage",
)
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
100.u256
)
)
return success(collateralPerSlot)

View File

@ -1,198 +0,0 @@
import pkg/ethers
import pkg/ethers/erc20
import pkg/json_rpc/rpcclient
import pkg/stint
import pkg/chronos
import ../clock
import ./requests
import ./proofs
import ./config
export stint
export ethers except `%`, `%*`, toJson
export erc20 except `%`, `%*`, toJson
export config
export requests
type
Marketplace* = ref object of Contract
Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError
Marketplace_SlashPercentageTooHigh* = object of SolidityError
Marketplace_MaximumSlashingTooHigh* = object of SolidityError
Marketplace_InvalidExpiry* = object of SolidityError
Marketplace_InvalidMaxSlotLoss* = object of SolidityError
Marketplace_InsufficientSlots* = object of SolidityError
Marketplace_InvalidClientAddress* = object of SolidityError
Marketplace_RequestAlreadyExists* = object of SolidityError
Marketplace_InvalidSlot* = object of SolidityError
Marketplace_SlotNotFree* = object of SolidityError
Marketplace_InvalidSlotHost* = object of SolidityError
Marketplace_AlreadyPaid* = object of SolidityError
Marketplace_TransferFailed* = object of SolidityError
Marketplace_UnknownRequest* = object of SolidityError
Marketplace_InvalidState* = object of SolidityError
Marketplace_StartNotBeforeExpiry* = object of SolidityError
Marketplace_SlotNotAcceptingProofs* = object of SolidityError
Marketplace_SlotIsFree* = object of SolidityError
Marketplace_ReservationRequired* = object of SolidityError
Marketplace_NothingToWithdraw* = object of SolidityError
Marketplace_InsufficientDuration* = object of SolidityError
Marketplace_InsufficientProofProbability* = object of SolidityError
Marketplace_InsufficientCollateral* = object of SolidityError
Marketplace_InsufficientReward* = object of SolidityError
Marketplace_InvalidCid* = object of SolidityError
Marketplace_DurationExceedsLimit* = object of SolidityError
Proofs_InsufficientBlockHeight* = object of SolidityError
Proofs_InvalidProof* = object of SolidityError
Proofs_ProofAlreadySubmitted* = object of SolidityError
Proofs_PeriodNotEnded* = object of SolidityError
Proofs_ValidationTimedOut* = object of SolidityError
Proofs_ProofNotMissing* = object of SolidityError
Proofs_ProofNotRequired* = object of SolidityError
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}
proc currentCollateral*(
marketplace: Marketplace, id: SlotId
): UInt256 {.contract, view.}
proc requestStorage*(
marketplace: Marketplace, request: StorageRequest
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
Marketplace_InsufficientReward, Marketplace_InvalidCid,
]
.}
proc fillSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc freeSlot*(
marketplace: Marketplace, id: SlotId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc freeSlot*(
marketplace: Marketplace,
id: SlotId,
rewardRecipient: Address,
collateralRecipient: Address,
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc getRequest*(
marketplace: Marketplace, id: RequestId
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
proc getActiveSlot*(
marketplace: Marketplace, id: SlotId
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
proc requestState*(
marketplace: Marketplace, requestId: RequestId
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
proc requestEnd*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc requestExpiry*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc getChallenge*(
marketplace: Marketplace, id: SlotId
): array[32, byte] {.contract, view.}
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
proc submitProof*(
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
): Confirmable {.
contract,
errors:
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
.}
proc markProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
]
.}
proc canMarkProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
Proofs_ProofAlreadyMarkedMissing,
]
.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): Confirmable {.contract.}
proc canReserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): bool {.contract, view.}

View File

@ -1,46 +0,0 @@
import pkg/stint
import pkg/contractabi
import pkg/ethers/contracts/fields
type
Groth16Proof* = object
a*: G1Point
b*: G2Point
c*: G1Point
G1Point* = object
x*: UInt256
y*: UInt256
# A field element F_{p^2} encoded as `real + i * imag`
Fp2Element* = object
real*: UInt256
imag*: UInt256
G2Point* = object
x*: Fp2Element
y*: Fp2Element
func solidityType*(_: type G1Point): string =
solidityType(G1Point.fieldTypes)
func solidityType*(_: type Fp2Element): string =
solidityType(Fp2Element.fieldTypes)
func solidityType*(_: type G2Point): string =
solidityType(G2Point.fieldTypes)
func solidityType*(_: type Groth16Proof): string =
solidityType(Groth16Proof.fieldTypes)
func encode*(encoder: var AbiEncoder, point: G1Point) =
encoder.write(point.fieldValues)
func encode*(encoder: var AbiEncoder, element: Fp2Element) =
encoder.write(element.fieldValues)
func encode*(encoder: var AbiEncoder, point: G2Point) =
encoder.write(point.fieldValues)
func encode*(encoder: var AbiEncoder, proof: Groth16Proof) =
encoder.write(proof.fieldValues)

View File

@ -1,123 +0,0 @@
import pkg/ethers/provider
import pkg/chronos
import pkg/questionable
import ../logutils
from ../clock import SecondsSince1970
logScope:
topics = "marketplace onchain provider"
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
raise newException(ProviderError, message)
proc blockNumberAndTimestamp*(
provider: Provider, blockTag: BlockTag
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
without latestBlock =? await provider.getBlock(blockTag):
raiseProviderError("Could not get latest block")
without latestBlockNumber =? latestBlock.number:
raiseProviderError("Could not get latest block number")
return (latestBlockNumber, latestBlock.timestamp)
proc binarySearchFindClosestBlock(
provider: Provider, epochTime: int, low: UInt256, high: UInt256
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
if abs(lowTimestamp.truncate(int) - epochTime) <
abs(highTimestamp.truncate(int) - epochTime):
return low
else:
return high
proc binarySearchBlockNumberForEpoch(
provider: Provider,
epochTime: UInt256,
latestBlockNumber: UInt256,
earliestBlockNumber: UInt256,
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
var low = earliestBlockNumber
var high = latestBlockNumber
while low <= high:
if low == 0 and high == 0:
return low
let mid = (low + high) div 2
let (midBlockNumber, midBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
if midBlockTimestamp < epochTime:
low = mid + 1
elif midBlockTimestamp > epochTime:
high = mid - 1
else:
return midBlockNumber
# NOTICE that by how the binary search is implemented, when it finishes
# low is always greater than high - this is why we use high, where
# intuitively we would use low:
await provider.binarySearchFindClosestBlock(
epochTime.truncate(int), low = high, high = low
)
proc blockNumberForEpoch*(
provider: Provider, epochTime: SecondsSince1970
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let epochTimeUInt256 = epochTime.u256
let (latestBlockNumber, latestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.latest)
let (earliestBlockNumber, earliestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.earliest)
# Initially we used the average block time to predict
# the number of blocks we need to look back in order to find
# the block number corresponding to the given epoch time.
# This estimation can be highly inaccurate if block time
# was changing in the past or is fluctuating and therefore
# we used that information initially only to find out
# if the available history is long enough to perform effective search.
# It turns out we do not have to do that. There is an easier way.
#
# First we check if the given epoch time equals the timestamp of either
# the earliest or the latest block. If it does, we just return the
# block number of that block.
#
# Otherwise, if the earliest available block is not the genesis block,
# we should check the timestamp of that earliest block and if it is greater
# than the epoch time, we should issue a warning and return
# that earliest block number.
# In all other cases, thus when the earliest block is not the genesis
# block but its timestamp is not greater than the requested epoch time, or
# if the earliest available block is the genesis block,
# (which means we have the whole history available), we should proceed with
# the binary search.
#
# Additional benefit of this method is that we do not have to rely
# on the average block time, which not only makes the whole thing
# more reliable, but also easier to test.
# Are lucky today?
if earliestBlockTimestamp == epochTimeUInt256:
return earliestBlockNumber
if latestBlockTimestamp == epochTimeUInt256:
return latestBlockNumber
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
let availableHistoryInDays =
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
warn "Short block history detected.",
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
return earliestBlockNumber
return await provider.binarySearchBlockNumberForEpoch(
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
)
proc pastBlockTag*(
provider: Provider, blocksAgo: int
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
let head = await provider.getBlockNumber()
return BlockTag.init(head - blocksAgo.abs.u256)

View File

@ -1,206 +0,0 @@
import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/libp2p/[cid, multicodec]
import ../logutils
import ../utils/json
from ../errors import mapFailure
export contractabi
type
StorageRequest* = object
client* {.serialize.}: Address
ask* {.serialize.}: StorageAsk
content* {.serialize.}: StorageContent
expiry* {.serialize.}: uint64
nonce*: Nonce
StorageAsk* = object
proofProbability* {.serialize.}: UInt256
pricePerBytePerSecond* {.serialize.}: UInt256
collateralPerByte* {.serialize.}: UInt256
slots* {.serialize.}: uint64
slotSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
maxSlotLoss* {.serialize.}: uint64
StorageContent* = object
cid* {.serialize.}: Cid
merkleRoot*: array[32, byte]
Slot* = object
request* {.serialize.}: StorageRequest
slotIndex* {.serialize.}: uint64
SlotId* = distinct array[32, byte]
RequestId* = distinct array[32, byte]
Nonce* = distinct array[32, byte]
RequestState* {.pure.} = enum
New
Started
Cancelled
Finished
Failed
SlotState* {.pure.} = enum
Free
Filled
Finished
Failed
Paid
Cancelled
Repair
proc `==`*(x, y: Nonce): bool {.borrow.}
proc `==`*(x, y: RequestId): bool {.borrow.}
proc `==`*(x, y: SlotId): bool {.borrow.}
proc hash*(x: SlotId): Hash {.borrow.}
proc hash*(x: Nonce): Hash {.borrow.}
proc hash*(x: Address): Hash {.borrow.}
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
array[32, byte](id)
proc `$`*(id: RequestId | SlotId | Nonce): string =
id.toArray.toHex
proc fromHex*(T: type RequestId, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*(T: type SlotId, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*(T: type Nonce, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*[T: distinct](_: type T, hex: string): T =
type baseType = T.distinctBase
T baseType.fromHex(hex)
proc toHex*[T: distinct](id: T): string =
type baseType = T.distinctBase
baseType(id).toHex
logutils.formatIt(LogFormat.textLines, Nonce):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce):
it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId):
it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId):
it.to0xHexLog
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
StorageRequest(
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
)
func fromTuple(_: type Slot, tupl: tuple): Slot =
Slot(request: tupl[0], slotIndex: tupl[1])
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
StorageAsk(
proofProbability: tupl[0],
pricePerBytePerSecond: tupl[1],
collateralPerByte: tupl[2],
slots: tupl[3],
slotSize: tupl[4],
duration: tupl[5],
maxSlotLoss: tupl[6],
)
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
func solidityType*(_: type Cid): string =
solidityType(seq[byte])
func solidityType*(_: type StorageContent): string =
solidityType(StorageContent.fieldTypes)
func solidityType*(_: type StorageAsk): string =
solidityType(StorageAsk.fieldTypes)
func solidityType*(_: type StorageRequest): string =
solidityType(StorageRequest.fieldTypes)
# Note: it seems to be ok to ignore the vbuffer offset for now
func encode*(encoder: var AbiEncoder, cid: Cid) =
encoder.write(cid.data.buffer)
func encode*(encoder: var AbiEncoder, content: StorageContent) =
encoder.write(content.fieldValues)
func encode*(encoder: var AbiEncoder, ask: StorageAsk) =
encoder.write(ask.fieldValues)
func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
encoder.write(id.toArray)
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
encoder.write(request.fieldValues)
func encode*(encoder: var AbiEncoder, slot: Slot) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
let data = ?decoder.read(seq[byte])
Cid.init(data).mapFailure
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
let tupl = ?decoder.read(StorageContent.fieldTypes)
success StorageContent.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type StorageAsk): ?!T =
let tupl = ?decoder.read(StorageAsk.fieldTypes)
success StorageAsk.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T =
let tupl = ?decoder.read(StorageRequest.fieldTypes)
success StorageRequest.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
let tupl = ?decoder.read(Slot.fieldTypes)
success Slot.fromTuple(tupl)
func id*(request: StorageRequest): RequestId =
let encoding = AbiEncoder.encode((request,))
RequestId(keccak256.digest(encoding).data)
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
let encoding = AbiEncoder.encode((requestId, slotIndex))
SlotId(keccak256.digest(encoding).data)
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
slotId(request.id, slotIndex)
func id*(slot: Slot): SlotId =
slotId(slot.request, slot.slotIndex)
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
ask.pricePerBytePerSecond * ask.slotSize.u256
func pricePerSlot*(ask: StorageAsk): UInt256 =
ask.duration.u256 * ask.pricePerSlotPerSecond
func totalPrice*(ask: StorageAsk): UInt256 =
ask.slots.u256 * ask.pricePerSlot
func totalPrice*(request: StorageRequest): UInt256 =
request.ask.totalPrice
func collateralPerSlot*(ask: StorageAsk): UInt256 =
ask.collateralPerByte * ask.slotSize.u256
func size*(ask: StorageAsk): uint64 =
ask.slots * ask.slotSize

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,13 +10,13 @@
{.push raises: [].} {.push raises: [].}
import std/algorithm import std/algorithm
import std/net
import std/sequtils import std/sequtils
import pkg/chronos import pkg/chronos
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope] import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/shims/net
import pkg/contractabi/address as ca import pkg/contractabi/address as ca
import pkg/codexdht/discv5/[routing_table, protocol as discv5] import pkg/codexdht/discv5/[routing_table, protocol as discv5]
from pkg/nimcrypto import keccak256 from pkg/nimcrypto import keccak256
@ -43,6 +43,8 @@ type Discovery* = ref object of RootObj
# record to advertice node connection information, this carry any # record to advertice node connection information, this carry any
# address that the node can be connected on # address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
isStarted: bool
store: Datastore
proc toNodeId*(cid: Cid): NodeId = proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id ## Cid to discovery id
@ -157,7 +159,7 @@ method provide*(
method removeProvider*( method removeProvider*(
d: Discovery, peerId: PeerId d: Discovery, peerId: PeerId
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} = ): Future[void] {.base, async: (raises: [CancelledError]).} =
## Remove provider from providers table ## Remove provider from providers table
## ##
@ -203,15 +205,26 @@ proc start*(d: Discovery) {.async: (raises: []).} =
try: try:
d.protocol.open() d.protocol.open()
await d.protocol.start() await d.protocol.start()
d.isStarted = true
except CatchableError as exc: except CatchableError as exc:
error "Error starting discovery", exc = exc.msg error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async: (raises: []).} = proc stop*(d: Discovery) {.async: (raises: []).} =
if not d.isStarted:
warn "Discovery not started, skipping stop"
return
try: try:
await noCancel d.protocol.closeWait() await noCancel d.protocol.closeWait()
d.isStarted = false
except CatchableError as exc: except CatchableError as exc:
error "Error stopping discovery", exc = exc.msg error "Error stopping discovery", exc = exc.msg
proc close*(d: Discovery) {.async: (raises: []).} =
let res = await noCancel d.store.close()
if res.isErr:
error "Error closing discovery store", error = res.error().msg
proc new*( proc new*(
T: type Discovery, T: type Discovery,
key: PrivateKey, key: PrivateKey,
@ -224,8 +237,9 @@ proc new*(
## Create a new Discovery node instance for the given key and datastore ## Create a new Discovery node instance for the given key and datastore
## ##
var self = var self = Discovery(
Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId")) key: key, peerId: PeerId.init(key).expect("Should construct PeerId"), store: store
)
self.updateAnnounceRecord(announceAddrs) self.updateAnnounceRecord(announceAddrs)

View File

@ -1,25 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import ./erasure/erasure
import ./erasure/backends/leopard
export erasure
func leoEncoderProvider*(
size, buffers, parity: int
): EncoderBackend {.raises: [Defect].} =
## create new Leo Encoder
LeoEncoderBackend.new(size, buffers, parity)
func leoDecoderProvider*(
size, buffers, parity: int
): DecoderBackend {.raises: [Defect].} =
## create new Leo Decoder
LeoDecoderBackend.new(size, buffers, parity)

View File

@ -1,47 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
import ../stores
type
ErasureBackend* = ref object of RootObj
blockSize*: int # block size in bytes
buffers*: int # number of original pieces
parity*: int # number of redundancy pieces
EncoderBackend* = ref object of ErasureBackend
DecoderBackend* = ref object of ErasureBackend
method release*(self: ErasureBackend) {.base, gcsafe.} =
## release the backend
##
raiseAssert("not implemented!")
method encode*(
self: EncoderBackend,
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## encode buffers using a backend
##
raiseAssert("not implemented!")
method decode*(
self: DecoderBackend,
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## decode buffers using a backend
##
raiseAssert("not implemented!")

View File

@ -1,79 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/options
import pkg/leopard
import pkg/results
import ../backend
type
LeoEncoderBackend* = ref object of EncoderBackend
encoder*: Option[LeoEncoder]
LeoDecoderBackend* = ref object of DecoderBackend
decoder*: Option[LeoDecoder]
method encode*(
self: LeoEncoderBackend,
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] =
## Encode data using Leopard backend
if parityLen == 0:
return ok()
var encoder =
if self.encoder.isNone:
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
self.encoder.get()
else:
self.encoder.get()
encoder.encode(data, parity, dataLen, parityLen)
method decode*(
self: LeoDecoderBackend,
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] =
## Decode data using given Leopard backend
var decoder =
if self.decoder.isNone:
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
self.decoder.get()
else:
self.decoder.get()
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
method release*(self: LeoEncoderBackend) =
if self.encoder.isSome:
self.encoder.get().free()
method release*(self: LeoDecoderBackend) =
if self.decoder.isSome:
self.decoder.get().free()
proc new*(
T: type LeoEncoderBackend, blockSize, buffers, parity: int
): LeoEncoderBackend =
## Create an instance of an Leopard Encoder backend
##
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
proc new*(
T: type LeoDecoderBackend, blockSize, buffers, parity: int
): LeoDecoderBackend =
## Create an instance of an Leopard Decoder backend
##
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)

View File

@ -1,678 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
import std/[sugar, atomics, sequtils]
import pkg/chronos
import pkg/chronos/threadsync
import pkg/chronicles
import pkg/libp2p/[multicodec, cid, multihash]
import pkg/libp2p/protobuf/minprotobuf
import pkg/taskpools
import ../logutils
import ../manifest
import ../merkletree
import ../stores
import ../blocktype as bt
import ../utils
import ../utils/asynciter
import ../indexingstrategy
import ../errors
import ../utils/arrayutils
import pkg/stew/byteutils
import ./backend
export backend
logScope:
topics = "codex erasure"
type
## Encode a manifest into one that is erasure protected.
##
## The new manifest has K `blocks` that are encoded into
## additional M `parity` blocks. The resulting dataset
## is padded with empty blocks if it doesn't have a square
## shape.
##
## NOTE: The padding blocks could be excluded
## from transmission, but they aren't for now.
##
## The resulting dataset is logically divided into rows
## where a row is made up of B blocks. There are then,
## K + M = N rows in total, each of length B blocks. Rows
## are assumed to be of the same number of (B) blocks.
##
## The encoding is systematic and the rows can be
## read sequentially by any node without decoding.
##
## Decoding is possible with any K rows or partial K
## columns (with up to M blocks missing per column),
## or any combination there of.
##
EncoderProvider* =
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
DecoderProvider* =
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
Erasure* = ref object
taskPool: Taskpool
encoderProvider*: EncoderProvider
decoderProvider*: DecoderProvider
store*: BlockStore
EncodingParams = object
ecK: Natural
ecM: Natural
rounded: Natural
steps: Natural
blocksCount: Natural
strategy: StrategyType
ErasureError* = object of CodexError
InsufficientBlocksError* = object of ErasureError
# Minimum size, in bytes, that the dataset must have had
# for the encoding request to have succeeded with the parameters
# provided.
minSize*: NBytes
EncodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen, parityLen: int
signal: ThreadSignalPtr
DecodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen: int
parityLen, recoveredLen: int
signal: ThreadSignalPtr
func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
## `idx` - the index to convert
## `step` - the current step
## `pos` - the position in the encoded dataset
##
(idx - step) div steps
proc getPendingBlocks(
self: Erasure, manifest: Manifest, indicies: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var
# request blocks from the store
pendingBlocks = indicies.map(
(i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
(r: ?!bt.Block) => (r, i)
) # Get the data blocks (first K)
)
proc isFinished(): bool =
pendingBlocks.len == 0
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
let completedFut = await one(pendingBlocks)
if (let i = pendingBlocks.find(completedFut); i >= 0):
pendingBlocks.del(i)
return await completedFut
else:
let (_, index) = await completedFut
raise newException(
CatchableError,
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
$index,
)
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
proc prepareEncodingData(
self: Erasure,
manifest: Manifest,
params: EncodingParams,
step: Natural,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!Natural] {.async.} =
## Prepare data for encoding
##
let
strategy = params.strategy.init(
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter =
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
cids[idx] = blk.cid
resolved.inc()
for idx in indicies.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
err:
return failure(err)
cids[idx] = emptyBlockCid
success(resolved.Natural)
proc prepareDecodingData(
self: Erasure,
encoded: Manifest,
step: Natural,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!(Natural, Natural)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
## `data` - the data to be prepared
## `parityData` - the parityData to be prepared
## `cids` - cids of prepared data
## `emptyBlock` - the empty block to be used for padding
##
let
strategy = encoded.protectedStrategy.init(
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
var
dataPieces = 0
parityPieces = 0
resolved = 0
for fut in pendingBlocksIter:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if resolved >= encoded.ecK:
break
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let pos = indexToPos(encoded.steps, idx, step)
logScope:
cid = blk.cid
idx = idx
pos = pos
step = step
empty = blk.isEmpty
cids[idx] = blk.cid
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
)
parityPieces.inc
else:
trace "Retrieved data block"
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
dataPieces.inc
resolved.inc
return success (dataPieces.Natural, parityPieces.Natural)
proc init*(
_: type EncodingParams,
manifest: Manifest,
ecK: Natural,
ecM: Natural,
strategy: StrategyType,
): ?!EncodingParams =
if ecK > manifest.blocksCount:
let exc = (ref InsufficientBlocksError)(
msg:
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
", blocksCount = " & $manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize,
)
return failure(exc)
let
rounded = roundUp(manifest.blocksCount, ecK)
steps = divUp(rounded, ecK)
blocksCount = rounded + (steps * ecM)
success EncodingParams(
ecK: ecK,
ecM: ecM,
rounded: rounded,
steps: steps,
blocksCount: blocksCount,
strategy: strategy,
)
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let encoder =
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
encoder.release()
discard task[].signal.fireSync()
if (
let res =
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
res.isErr
):
warn "Error from leopard encoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncEncode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var data = makeUncheckedArray(blocks)
defer:
dealloc(data)
## Create an ecode task with block data
var task = EncodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
blocks: data,
parity: parity,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding failed")
success()
proc encodeData(
self: Erasure, manifest: Manifest, params: EncodingParams
): Future[?!Manifest] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
logScope:
steps = params.steps
rounded_blocks = params.rounded
blocks_count = params.blocksCount
ecK = params.ecK
ecM = params.ecM
var
cids = seq[Cid].new()
emptyBlock = newSeq[byte](manifest.blockSize.int)
cids[].setLen(params.blocksCount)
try:
for step in 0 ..< params.steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
err:
trace "Unable to prepare data", error = err.msg
return failure(err)
trace "Erasure coding data", data = data[].len
try:
if err =? (
await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(parity, params.ecM)
var idx = params.rounded + step
for j in 0 ..< params.ecM:
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
error:
trace "Unable to create parity block", err = error.msg
return failure(error)
trace "Adding parity block", cid = blk.cid, idx
cids[idx] = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
idx.inc(params.steps)
without tree =? CodexTree.init(cids[]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
let encodedManifest = Manifest.new(
manifest = manifest,
treeCid = treeCid,
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
ecK = params.ecK,
ecM = params.ecM,
strategy = params.strategy,
)
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
success encodedManifest
except CancelledError as exc:
trace "Erasure coding encoding cancelled"
raise exc # cancellation needs to be propagated
except CatchableError as exc:
trace "Erasure coding encoding error", exc = exc.msg
return failure(exc)
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: Natural,
parity: Natural,
strategy = SteppedStrategy,
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
## `manifest` - the original manifest to be encoded
## `blocks` - the number of blocks to be encoded - K
## `parity` - the number of parity blocks to generate - M
##
without params =? EncodingParams.init(manifest, blocks.int, parity.int, strategy), err:
return failure(err)
without encodedManifest =? await self.encodeData(manifest, params), err:
return failure(err)
return success encodedManifest
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let decoder =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
decoder.release()
discard task[].signal.fireSync()
if (
let res = decoder.decode(
task[].blocks,
task[].parity,
task[].recovered,
task[].blocksLen,
task[].parityLen,
task[].recoveredLen,
)
res.isErr
):
warn "Error from leopard decoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncDecode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]],
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var
blockData = makeUncheckedArray(blocks)
parityData = makeUncheckedArray(parity)
defer:
dealloc(blockData)
dealloc(parityData)
## Create an decode task with block data
var task = DecodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
recoveredLen: blocksLen,
blocks: blockData,
parity: parityData,
recovered: recovered,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding failed")
success()
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.blocksCount
var
cids = seq[Cid].new()
recoveredIndices = newSeq[Natural]()
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
cids[].setLen(encoded.blocksCount)
try:
for step in 0 ..< encoded.steps:
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
var
data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
without (dataPieces, _) =? (
await self.prepareDecodingData(
encoded, step, data, parityData, cids, emptyBlock
)
), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
if dataPieces >= encoded.ecK:
trace "Retrieved all the required data blocks"
continue
trace "Erasure decoding data"
try:
if err =? (
await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(recovered, encoded.ecK)
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
if data[i].len <= 0 and not cids[idx].isEmpty:
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
without blk =? bt.Block.new(
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
), error:
trace "Unable to create block!", exc = error.msg
return failure(error)
trace "Recovered block", cid = blk.cid, index = i
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
trace "Erasure coding decoding cancelled"
raise exc # cancellation needs to be propagated
except CatchableError as exc:
trace "Erasure coding decoding error", exc = exc.msg
return failure(exc)
finally:
decoder.release()
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
let idxIter =
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
return failure(err)
let decoded = Manifest.new(encoded)
return decoded.success
proc start*(self: Erasure) {.async.} =
return
proc stop*(self: Erasure) {.async.} =
return
proc new*(
T: type Erasure,
store: BlockStore,
encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider,
taskPool: Taskpool,
): Erasure =
## Create a new Erasure instance for encoding and decoding manifests
##
Erasure(
store: store,
encoderProvider: encoderProvider,
decoderProvider: decoderProvider,
taskPool: taskPool,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,100 +0,0 @@
import ./errors
import ./utils
import ./utils/asynciter
{.push raises: [].}
type
StrategyType* = enum
# Simplest approach:
# 0 => 0, 1, 2
# 1 => 3, 4, 5
# 2 => 6, 7, 8
LinearStrategy
# Stepped indexing:
# 0 => 0, 3, 6
# 1 => 1, 4, 7
# 2 => 2, 5, 8
SteppedStrategy
# Representing a strategy for grouping indices (of blocks usually)
# Given an interation-count as input, will produce a seq of
# selected indices.
IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError
IndexingStrategy* = object
strategyType*: StrategyType
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
step*: int
func checkIteration(
self: IndexingStrategy, iteration: int
): void {.raises: [IndexingError].} =
if iteration >= self.iterations:
raise newException(
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
)
func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}:
Iter[int].new(first, last, step)
func getLinearIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let
first = self.firstIndex + iteration * self.step
last = min(first + self.step - 1, self.lastIndex)
getIter(first, last, 1)
func getSteppedIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let
first = self.firstIndex + iteration
last = self.lastIndex
getIter(first, last, self.iterations)
func getIndicies*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
case self.strategyType
of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration)
of StrategyType.SteppedStrategy:
self.getSteppedIndicies(iteration)
func init*(
strategy: StrategyType, firstIndex, lastIndex, iterations: int
): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex:
raise newException(
IndexingWrongIndexError,
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
")",
)
if iterations <= 0:
raise newException(
IndexingWrongIterationsError,
"iterations (" & $iterations & ") must be greater than zero.",
)
IndexingStrategy(
strategyType: strategy,
firstIndex: firstIndex,
lastIndex: lastIndex,
iterations: iterations,
step: divUp((lastIndex - firstIndex + 1), iterations),
)

View File

@ -11,7 +11,7 @@
## 4. Remove usages of `nim-json-serialization` from the codebase ## 4. Remove usages of `nim-json-serialization` from the codebase
## 5. Remove need to declare `writeValue` for new types ## 5. Remove need to declare `writeValue` for new types
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent ## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467) ## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
## ##
## When declaring a new type, one should consider importing the `codex/logutils` ## When declaring a new type, one should consider importing the `codex/logutils`
## module, and specifying `formatIt`. If textlines log output and json log output ## module, and specifying `formatIt`. If textlines log output and json log output
@ -92,6 +92,7 @@ import std/sugar
import std/typetraits import std/typetraits
import pkg/chronicles except toJson, `%` import pkg/chronicles except toJson, `%`
from pkg/chronos import TransportAddress
from pkg/libp2p import Cid, MultiAddress, `$` from pkg/libp2p import Cid, MultiAddress, `$`
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -255,3 +256,5 @@ formatIt(LogFormat.textLines, array[32, byte]):
it.short0xHexLog it.short0xHexLog
formatIt(LogFormat.json, array[32, byte]): formatIt(LogFormat.json, array[32, byte]):
it.to0xHex it.to0xHex
formatIt(TransportAddress):
$it

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,11 +9,9 @@
# This module implements serialization and deserialization of Manifest # This module implements serialization and deserialization of Manifest
import pkg/upraises
import times import times
push: {.push raises: [].}
{.upraises: [].}
import std/tables import std/tables
import std/sequtils import std/sequtils
@ -27,32 +25,18 @@ import ./manifest
import ../errors import ../errors
import ../blocktype import ../blocktype
import ../logutils import ../logutils
import ../indexingstrategy
proc encode*(manifest: Manifest): ?!seq[byte] = proc encode*(manifest: Manifest): ?!seq[byte] =
## Encode the manifest into a ``ManifestCodec`` ## Encode the manifest into a ``ManifestCodec``
## multicodec container (Dag-pb) for now ## multicodec container (Dag-pb) for now
## ##
?manifest.verify()
var pbNode = initProtoBuffer() var pbNode = initProtoBuffer()
# NOTE: The `Data` field in the the `dag-pb` # NOTE: The `Data` field in the the `dag-pb`
# contains the following protobuf `Message` # contains the following protobuf `Message`
# #
# ```protobuf # ```protobuf
# Message VerificationInfo {
# bytes verifyRoot = 1; # Decimal encoded field-element
# repeated bytes slotRoots = 2; # Decimal encoded field-elements
# }
# Message ErasureInfo {
# optional uint32 ecK = 1; # number of encoded blocks
# optional uint32 ecM = 2; # number of parity blocks
# optional bytes originalTreeCid = 3; # cid of the original dataset
# optional uint32 originalDatasetSize = 4; # size of the original dataset
# optional VerificationInformation verification = 5; # verification information
# }
#
# Message Header { # Message Header {
# optional bytes treeCid = 1; # cid (root) of the tree # optional bytes treeCid = 1; # cid (root) of the tree
# optional uint32 blockSize = 2; # size of a single block # optional uint32 blockSize = 2; # size of a single block
@ -60,9 +44,8 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
# optional codec: MultiCodec = 4; # Dataset codec # optional codec: MultiCodec = 4; # Dataset codec
# optional hcodec: MultiCodec = 5 # Multihash codec # optional hcodec: MultiCodec = 5 # Multihash codec
# optional version: CidVersion = 6; # Cid version # optional version: CidVersion = 6; # Cid version
# optional ErasureInfo erasure = 7; # erasure coding info # optional filename: ?string = 7; # original filename
# optional filename: ?string = 8; # original filename # optional mimetype: ?string = 8; # original mimetype
# optional mimetype: ?string = 9; # original mimetype
# } # }
# ``` # ```
# #
@ -75,31 +58,11 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
header.write(5, manifest.hcodec.uint32) header.write(5, manifest.hcodec.uint32)
header.write(6, manifest.version.uint32) header.write(6, manifest.version.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
erasureInfo.write(4, manifest.originalDatasetSize.uint64)
erasureInfo.write(5, manifest.protectedStrategy.uint32)
if manifest.verifiable:
var verificationInfo = initProtoBuffer()
verificationInfo.write(1, manifest.verifyRoot.data.buffer)
for slotRoot in manifest.slotRoots:
verificationInfo.write(2, slotRoot.data.buffer)
verificationInfo.write(3, manifest.cellSize.uint32)
verificationInfo.write(4, manifest.verifiableStrategy.uint32)
erasureInfo.write(6, verificationInfo)
erasureInfo.finish()
header.write(7, erasureInfo)
if manifest.filename.isSome: if manifest.filename.isSome:
header.write(8, manifest.filename.get()) header.write(7, manifest.filename.get())
if manifest.mimetype.isSome: if manifest.mimetype.isSome:
header.write(9, manifest.mimetype.get()) header.write(8, manifest.mimetype.get())
pbNode.write(1, header) # set the treeCid as the data field pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish() pbNode.finish()
@ -113,22 +76,12 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
var var
pbNode = initProtoBuffer(data) pbNode = initProtoBuffer(data)
pbHeader: ProtoBuffer pbHeader: ProtoBuffer
pbErasureInfo: ProtoBuffer
pbVerificationInfo: ProtoBuffer
treeCidBuf: seq[byte] treeCidBuf: seq[byte]
originalTreeCid: seq[byte]
datasetSize: uint64 datasetSize: uint64
codec: uint32 codec: uint32
hcodec: uint32 hcodec: uint32
version: uint32 version: uint32
blockSize: uint32 blockSize: uint32
originalDatasetSize: uint64
ecK, ecM: uint32
protectedStrategy: uint32
verifyRoot: seq[byte]
slotRoots: seq[seq[byte]]
cellSize: uint32
verifiableStrategy: uint32
filename: string filename: string
mimetype: string mimetype: string
@ -155,98 +108,27 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
if pbHeader.getField(6, version).isErr: if pbHeader.getField(6, version).isErr:
return failure("Unable to decode `version` from manifest!") return failure("Unable to decode `version` from manifest!")
if pbHeader.getField(7, pbErasureInfo).isErr: if pbHeader.getField(7, filename).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
if pbHeader.getField(8, filename).isErr:
return failure("Unable to decode `filename` from manifest!") return failure("Unable to decode `filename` from manifest!")
if pbHeader.getField(9, mimetype).isErr: if pbHeader.getField(8, mimetype).isErr:
return failure("Unable to decode `mimetype` from manifest!") return failure("Unable to decode `mimetype` from manifest!")
let protected = pbErasureInfo.buffer.len > 0
var verifiable = false
if protected:
if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalTreeCid).isErr:
return failure("Unable to decode `originalTreeCid` from manifest!")
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
return failure("Unable to decode `originalDatasetSize` from manifest!")
if pbErasureInfo.getField(5, protectedStrategy).isErr:
return failure("Unable to decode `protectedStrategy` from manifest!")
if pbErasureInfo.getField(6, pbVerificationInfo).isErr:
return failure("Unable to decode `verificationInfo` from manifest!")
verifiable = pbVerificationInfo.buffer.len > 0
if verifiable:
if pbVerificationInfo.getField(1, verifyRoot).isErr:
return failure("Unable to decode `verifyRoot` from manifest!")
if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr:
return failure("Unable to decode `slotRoots` from manifest!")
if pbVerificationInfo.getField(3, cellSize).isErr:
return failure("Unable to decode `cellSize` from manifest!")
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
return failure("Unable to decode `verifiableStrategy` from manifest!")
let treeCid = ?Cid.init(treeCidBuf).mapFailure let treeCid = ?Cid.init(treeCidBuf).mapFailure
var filenameOption = if filename.len == 0: string.none else: filename.some var filenameOption = if filename.len == 0: string.none else: filename.some
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
let self = let self = Manifest.new(
if protected: treeCid = treeCid,
Manifest.new( datasetSize = datasetSize.NBytes,
treeCid = treeCid, blockSize = blockSize.NBytes,
datasetSize = datasetSize.NBytes, version = CidVersion(version),
blockSize = blockSize.NBytes, hcodec = hcodec.MultiCodec,
version = CidVersion(version), codec = codec.MultiCodec,
hcodec = hcodec.MultiCodec, filename = filenameOption,
codec = codec.MultiCodec, mimetype = mimetypeOption,
ecK = ecK.int, )
ecM = ecM.int,
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes,
strategy = StrategyType(protectedStrategy),
filename = filenameOption,
mimetype = mimetypeOption,
)
else:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
)
?self.verify()
if verifiable:
let
verifyRootCid = ?Cid.init(verifyRoot).mapFailure
slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure)
return Manifest.new(
manifest = self,
verifyRoot = verifyRootCid,
slotRoots = slotRootCids,
cellSize = cellSize.NBytes,
strategy = StrategyType(verifiableStrategy),
)
self.success self.success

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# This module defines all operations on Manifest # This module defines all operations on Manifest
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p/protobuf/minprotobuf import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec] import pkg/libp2p/[cid, multihash, multicodec]
@ -23,7 +20,6 @@ import ../utils
import ../utils/json import ../utils/json
import ../units import ../units
import ../blocktype import ../blocktype
import ../indexingstrategy
import ../logutils import ../logutils
# TODO: Manifest should be reworked to more concrete types, # TODO: Manifest should be reworked to more concrete types,
@ -38,24 +34,6 @@ type Manifest* = ref object of RootObj
version: CidVersion # Cid version version: CidVersion # Cid version
filename {.serialize.}: ?string # The filename of the content uploaded (optional) filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
case protected {.serialize.}: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
case verifiable {.serialize.}: bool
# Verifiable datasets can be used to generate storage proofs
of true:
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
cellSize: NBytes # Size of each slot cell
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
else:
discard
else:
discard
############################################################ ############################################################
# Accessors # Accessors
@ -76,54 +54,12 @@ func hcodec*(self: Manifest): MultiCodec =
func codec*(self: Manifest): MultiCodec = func codec*(self: Manifest): MultiCodec =
self.codec self.codec
func protected*(self: Manifest): bool =
self.protected
func ecK*(self: Manifest): int =
self.ecK
func ecM*(self: Manifest): int =
self.ecM
func originalTreeCid*(self: Manifest): Cid =
self.originalTreeCid
func originalBlocksCount*(self: Manifest): int =
divUp(self.originalDatasetSize.int, self.blockSize.int)
func originalDatasetSize*(self: Manifest): NBytes =
self.originalDatasetSize
func treeCid*(self: Manifest): Cid = func treeCid*(self: Manifest): Cid =
self.treeCid self.treeCid
func blocksCount*(self: Manifest): int = func blocksCount*(self: Manifest): int =
divUp(self.datasetSize.int, self.blockSize.int) divUp(self.datasetSize.int, self.blockSize.int)
func verifiable*(self: Manifest): bool =
bool (self.protected and self.verifiable)
func verifyRoot*(self: Manifest): Cid =
self.verifyRoot
func slotRoots*(self: Manifest): seq[Cid] =
self.slotRoots
func numSlots*(self: Manifest): int =
self.ecK + self.ecM
func cellSize*(self: Manifest): NBytes =
self.cellSize
func protectedStrategy*(self: Manifest): StrategyType =
self.protectedStrategy
func verifiableStrategy*(self: Manifest): StrategyType =
self.verifiableStrategy
func numSlotBlocks*(self: Manifest): int =
divUp(self.blocksCount, self.numSlots)
func filename*(self: Manifest): ?string = func filename*(self: Manifest): ?string =
self.filename self.filename
@ -144,51 +80,16 @@ func isManifest*(mc: MultiCodec): ?!bool =
# Various sizes and verification # Various sizes and verification
############################################################ ############################################################
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalBlocksCount, self.ecK)
func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest
divUp(self.rounded, self.ecK)
func verify*(self: Manifest): ?!void =
## Check manifest correctness
##
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
func `==`*(a, b: Manifest): bool = func `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and (a.codec == b.codec) and (a.filename == b.filename) and (a.mimetype == b.mimetype)
(a.mimetype == b.mimetype) and (
if a.protected:
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize) and
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
(
if a.verifiable:
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and (
a.verifiableStrategy == b.verifiableStrategy
)
else:
true
)
else:
true
)
func `$`*(self: Manifest): string = func `$`*(self: Manifest): string =
result = result =
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " & "treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec & $self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
", codec: " & $self.codec & ", protected: " & $self.protected ", codec: " & $self.codec
if self.filename.isSome: if self.filename.isSome:
result &= ", filename: " & $self.filename result &= ", filename: " & $self.filename
@ -196,20 +97,6 @@ func `$`*(self: Manifest): string =
if self.mimetype.isSome: if self.mimetype.isSome:
result &= ", mimetype: " & $self.mimetype result &= ", mimetype: " & $self.mimetype
result &= (
if self.protected:
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable & (
if self.verifiable:
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
else:
""
)
else:
""
)
return result return result
############################################################ ############################################################
@ -224,7 +111,6 @@ func new*(
version: CidVersion = CIDv1, version: CidVersion = CIDv1,
hcodec = Sha256HashCodec, hcodec = Sha256HashCodec,
codec = BlockCodec, codec = BlockCodec,
protected = false,
filename: ?string = string.none, filename: ?string = string.none,
mimetype: ?string = string.none, mimetype: ?string = string.none,
): Manifest = ): Manifest =
@ -235,132 +121,10 @@ func new*(
version: version, version: version,
codec: codec, codec: codec,
hcodec: hcodec, hcodec: hcodec,
protected: protected,
filename: filename, filename: filename,
mimetype: mimetype, mimetype: mimetype,
) )
func new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int,
strategy = SteppedStrategy,
): Manifest =
## Create an erasure protected dataset from an
## unprotected one
##
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK,
ecM: ecM,
originalTreeCid: manifest.treeCid,
originalDatasetSize: manifest.datasetSize,
protectedStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(T: type Manifest, manifest: Manifest): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
Manifest(
treeCid: manifest.originalTreeCid,
datasetSize: manifest.originalDatasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: false,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(
T: type Manifest,
treeCid: Cid,
datasetSize: NBytes,
blockSize: NBytes,
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes,
strategy = SteppedStrategy,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
blockSize: blockSize,
version: version,
hcodec: hcodec,
codec: codec,
protected: true,
ecK: ecK,
ecM: ecM,
originalTreeCid: originalTreeCid,
originalDatasetSize: originalDatasetSize,
protectedStrategy: strategy,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
manifest: Manifest,
verifyRoot: Cid,
slotRoots: openArray[Cid],
cellSize = DefaultCellSize,
strategy = LinearStrategy,
): ?!Manifest =
## Create a verifiable dataset from an
## protected one
##
if not manifest.protected:
return failure newException(
CodexError, "Can create verifiable manifest only from protected manifest."
)
if slotRoots.len != manifest.numSlots:
return failure newException(CodexError, "Wrong number of slot roots.")
success Manifest(
treeCid: manifest.treeCid,
datasetSize: manifest.datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: manifest.ecK,
ecM: manifest.ecM,
originalTreeCid: manifest.originalTreeCid,
originalDatasetSize: manifest.originalDatasetSize,
protectedStrategy: manifest.protectedStrategy,
verifiable: true,
verifyRoot: verifyRoot,
slotRoots: @slotRoots,
cellSize: cellSize,
verifiableStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest = func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
## Create a manifest instance from given data ## Create a manifest instance from given data
## ##

View File

@ -1,314 +0,0 @@
import pkg/chronos
import pkg/upraises
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
import ./contracts/proofs
import ./clock
import ./errors
import ./periods
export chronos
export questionable
export requests
export proofs
export SecondsSince1970
export periods
type
Market* = ref object of RootObj
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
MarketplaceEvent* = Event
StorageRequested* = object of MarketplaceEvent
requestId*: RequestId
ask*: StorageAsk
expiry*: uint64
SlotFilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotFreed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotReservationsFull* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
RequestFulfilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestCancelled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestFailed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
ProofSubmitted* = object of MarketplaceEvent
id*: SlotId
method loadConfig*(
market: Market
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method getZkeyHash*(
market: Market
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getSigner*(
market: Market
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method periodicity*(
market: Market
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method proofTimeout*(
market: Market
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method repairRewardPercentage*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
raiseAssert("not implemented")
method proofDowntime*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
raiseAssert("not implemented")
proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
let downtime = await market.proofDowntime
let pntr = await market.getPointer(slotId)
return pntr < downtime
method requestStorage*(
market: Market, request: StorageRequest
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
raiseAssert("not implemented")
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
raiseAssert("not implemented")
method getRequest*(
market: Market, id: RequestId
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method requestState*(
market: Market, requestId: RequestId
): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented")
method slotState*(
market: Market, slotId: SlotId
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getRequestEnd*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method requestExpiresAt*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method getHost*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method currentCollateral*(
market: Market, slotId: SlotId
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
raiseAssert("not implemented")
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
raiseAssert("not implemented")
method fillSlot*(
market: Market,
requestId: RequestId,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method freeSlot*(
market: Market, slotId: SlotId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method withdrawFunds*(
market: Market, requestId: RequestId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method subscribeRequests*(
market: Market, callback: OnRequest
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method getChallenge*(
market: Market, id: SlotId
): Future[ProofChallenge] {.base, async.} =
raiseAssert("not implemented")
method submitProof*(
market: Market, id: SlotId, proof: Groth16Proof
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method markProofAsMissing*(
market: Market, id: SlotId, period: Period
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canMarkProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method reserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canReserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(
market: Market, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(
market: Market, requestId: RequestId, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(
market: Market, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFreed*(
market: Market, callback: OnSlotFreed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotReservationsFull*(
market: Market, callback: OnSlotReservationsFull
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(
market: Market, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(
market: Market, requestId: RequestId, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(
market: Market, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(
market: Market, requestId: RequestId, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeProofSubmission*(
market: Market, callback: OnProofSubmitted
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, blocksAgo: int
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.base, gcsafe, raises: [].} =
raiseAssert("not implemented")

View File

@ -1,10 +1,4 @@
import ./merkletree/merkletree import ./merkletree/merkletree
import ./merkletree/codex import ./merkletree/codex
import ./merkletree/poseidon2
export codex, poseidon2, merkletree export codex, merkletree
type
SomeMerkleTree* = ByteTree | CodexTree | Poseidon2Tree
SomeMerkleProof* = ByteProof | CodexProof | Poseidon2Proof
SomeMerkleHash* = ByteHash | Poseidon2Hash

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p import pkg/libp2p
import pkg/questionable import pkg/questionable

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,16 +10,18 @@
{.push raises: [].} {.push raises: [].}
import std/bitops import std/bitops
import std/sequtils import std/[atomics, sequtils]
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/libp2p/[cid, multicodec, multihash] import pkg/libp2p/[cid, multicodec, multihash]
import pkg/constantine/hashes import pkg/constantine/hashes
import pkg/taskpools
import pkg/chronos/threadsync
import ../../utils import ../../utils
import ../../rng import ../../rng
import ../../errors import ../../errors
import ../../blocktype import ../../codextypes
from ../../utils/digest import digestBytes from ../../utils/digest import digestBytes
@ -47,28 +49,6 @@ type
CodexProof* = ref object of ByteProof CodexProof* = ref object of ByteProof
mcodec*: MultiCodec mcodec*: MultiCodec
# CodeHashes is not exported from libp2p
# So we need to recreate it instead of
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof = func getProof*(self: CodexTree, index: int): ?!CodexProof =
var proof = CodexProof(mcodec: self.mcodec) var proof = CodexProof(mcodec: self.mcodec)
@ -128,38 +108,47 @@ proc `$`*(self: CodexProof): string =
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " & "CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )" $self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash = func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
## Compress two hashes ## Compress two hashes
## ##
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/codex-storage/nim-codex/issues/1162
let input = @x & @y & @[key.byte] let input = @x & @y & @[key.byte]
var digest = hashes.sha256.hash(input) let digest = ?MultiHash.digest(codec, input).mapFailure
success digest.digestBytes
success @digest func initTree(mcodec: MultiCodec, leaves: openArray[ByteHash]): ?!CodexTree =
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
if leaves.len == 0: if leaves.len == 0:
return failure "Empty leaves" return failure "Empty leaves"
let let
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash) compress(x, y, key, mcodec)
Zero: ByteHash = newSeq[byte](mhash.size) digestSize = ?mcodec.digestSize.mapFailure
Zero: ByteHash = newSeq[byte](digestSize)
if mhash.size != leaves[0].len: if digestSize != leaves[0].len:
return failure "Invalid hash length" return failure "Invalid hash length"
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) var self = CodexTree(mcodec: mcodec)
?self.prepare(compressor, Zero, leaves)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self success self
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
let tree = ?initTree(mcodec, leaves)
?tree.compute()
success tree
proc init*(
_: type CodexTree,
tp: Taskpool,
mcodec: MultiCodec = Sha256HashCodec,
leaves: seq[ByteHash],
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
let tree = ?initTree(mcodec, leaves)
?await tree.compute(tp)
success tree
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree = func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
if leaves.len == 0: if leaves.len == 0:
return failure "Empty leaves" return failure "Empty leaves"
@ -170,6 +159,18 @@ func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
CodexTree.init(mcodec, leaves) CodexTree.init(mcodec, leaves)
proc init*(
_: type CodexTree, tp: Taskpool, leaves: seq[MultiHash]
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = leaves[0].mcodec
leaves = leaves.mapIt(it.digestBytes)
await CodexTree.init(tp, mcodec, leaves)
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree = func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
if leaves.len == 0: if leaves.len == 0:
return failure "Empty leaves" return failure "Empty leaves"
@ -180,6 +181,18 @@ func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
CodexTree.init(mcodec, leaves) CodexTree.init(mcodec, leaves)
proc init*(
_: type CodexTree, tp: Taskpool, leaves: seq[Cid]
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
if leaves.len == 0:
return failure("Empty leaves")
let
mcodec = (?leaves[0].mhash.mapFailure).mcodec
leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes)
await CodexTree.init(tp, mcodec, leaves)
proc fromNodes*( proc fromNodes*(
_: type CodexTree, _: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec, mcodec: MultiCodec = Sha256HashCodec,
@ -190,23 +203,16 @@ proc fromNodes*(
return failure "Empty nodes" return failure "Empty nodes"
let let
mhash = ?mcodec.mhash() digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](mhash.size) Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash) compress(x, y, key, mcodec)
if mhash.size != nodes[0].len: if digestSize != nodes[0].len:
return failure "Invalid hash length" return failure "Invalid hash length"
var var self = CodexTree(mcodec: mcodec)
self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec) ?self.fromNodes(compressor, Zero, nodes, nleaves)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let let
index = Rng.instance.rand(nleaves - 1) index = Rng.instance.rand(nleaves - 1)
@ -228,10 +234,10 @@ func init*(
return failure "Empty nodes" return failure "Empty nodes"
let let
mhash = ?mcodec.mhash() digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](mhash.size) Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} = compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash) compress(x, y, key, mcodec)
success CodexProof( success CodexProof(
compress: compressor, compress: compressor,

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,19 +9,58 @@
{.push raises: [].} {.push raises: [].}
import std/bitops import std/[bitops, atomics, sequtils]
import stew/assign2
import pkg/questionable/results import pkg/questionable/results
import pkg/taskpools
import pkg/chronos
import pkg/chronos/threadsync
import ../errors import ../errors
import ../utils/sharedbuf
export sharedbuf
template nodeData(
data: openArray[byte], offsets: openArray[int], nodeSize, i, j: int
): openArray[byte] =
## Bytes of the j'th entry of the i'th level in the tree, starting with the
## leaves (at level 0).
let start = (offsets[i] + j) * nodeSize
data.toOpenArray(start, start + nodeSize - 1)
type type
# TODO hash functions don't fail - removing the ?! from this function would
# significantly simplify the flow below
CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].} CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
MerkleTree*[H, K] = ref object of RootObj CompressData[H, K] = object
layers*: seq[seq[H]] fn: CompressFn[H, K]
compress*: CompressFn[H, K] nodeSize: int
zero*: H zero: H
MerkleTreeObj*[H, K] = object of RootObj
store*: seq[byte]
## Flattened merkle tree where hashes are assumed to be trivial bytes and
## uniform in size.
##
## Each layer of the tree is stored serially starting with the leaves and
## ending with the root.
##
## Beacuse the tree might not be balanced, `layerOffsets` contains the
## index of the starting point of each level, for easy lookup.
layerOffsets*: seq[int]
## Starting point of each level in the tree, starting from the leaves -
## multiplied by the entry size, this is the offset in the payload where
## the entries of that level start
##
## For example, a tree with 4 leaves will have [0, 4, 6] stored here.
##
## See nodesPerLevel function, from whic this sequence is derived
compress*: CompressData[H, K]
MerkleTree*[H, K] = ref MerkleTreeObj[H, K]
MerkleProof*[H, K] = ref object of RootObj MerkleProof*[H, K] = ref object of RootObj
index*: int # linear index of the leaf, starting from 0 index*: int # linear index of the leaf, starting from 0
@ -30,33 +69,99 @@ type
compress*: CompressFn[H, K] # compress function compress*: CompressFn[H, K] # compress function
zero*: H # zero value zero*: H # zero value
func levels*[H, K](self: MerkleTree[H, K]): int =
return self.layerOffsets.len
func depth*[H, K](self: MerkleTree[H, K]): int = func depth*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len - 1 return self.levels() - 1
func nodesInLayer(offsets: openArray[int], layer: int): int =
if layer == offsets.high:
1
else:
offsets[layer + 1] - offsets[layer]
func nodesInLayer(self: MerkleTree | MerkleTreeObj, layer: int): int =
self.layerOffsets.nodesInLayer(layer)
func leavesCount*[H, K](self: MerkleTree[H, K]): int = func leavesCount*[H, K](self: MerkleTree[H, K]): int =
return self.layers[0].len return self.nodesInLayer(0)
func levels*[H, K](self: MerkleTree[H, K]): int = func nodesPerLevel(nleaves: int): seq[int] =
return self.layers.len ## Given a number of leaves, return a seq with the number of nodes at each
## layer of the tree (from the bottom/leaves to the root)
##
## Ie For a tree of 4 leaves, return `[4, 2, 1]`
if nleaves <= 0:
return @[]
elif nleaves == 1:
return @[1, 1] # leaf and root
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] = var nodes: seq[int] = @[]
return self.layers[0] var m = nleaves
while true:
nodes.add(m)
if m == 1:
break
# Next layer size is ceil(m/2)
m = (m + 1) shr 1
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] = nodes
for layer in self.layers:
yield layer func layerOffsets(nleaves: int): seq[int] =
## Given a number of leaves, return a seq of the starting offsets of each
## layer in the node store that results from flattening the binary tree
##
## Ie For a tree of 4 leaves, return `[0, 4, 6]`
let nodes = nodesPerLevel(nleaves)
var tot = 0
let offsets = nodes.mapIt:
let cur = tot
tot += it
cur
offsets
template nodeData(self: MerkleTreeObj, i, j: int): openArray[byte] =
## Bytes of the j'th node of the i'th level in the tree, starting with the
## leaves (at level 0).
self.store.nodeData(self.layerOffsets, self.compress.nodeSize, i, j)
func layer*[H, K](
self: MerkleTree[H, K], layer: int
): seq[H] {.deprecated: "Expensive".} =
var nodes = newSeq[H](self.nodesInLayer(layer))
for i, h in nodes.mpairs:
assign(h, self[].nodeData(layer, i))
return nodes
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} =
self.layer(0)
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} =
for i in 0 ..< self.layerOffsets.len:
yield self.layer(i)
proc layers*[H, K](self: MerkleTree[H, K]): seq[seq[H]] {.deprecated: "Expensive".} =
for l in self.layers():
result.add l
iterator nodes*[H, K](self: MerkleTree[H, K]): H = iterator nodes*[H, K](self: MerkleTree[H, K]): H =
for layer in self.layers: ## Iterate over the nodes of each layer starting with the leaves
for node in layer: var node: H
for i in 0 ..< self.layerOffsets.len:
let nodesInLayer = self.nodesInLayer(i)
for j in 0 ..< nodesInLayer:
assign(node, self[].nodeData(i, j))
yield node yield node
func root*[H, K](self: MerkleTree[H, K]): ?!H = func root*[H, K](self: MerkleTree[H, K]): ?!H =
let last = self.layers[^1] mixin assign
if last.len != 1: if self.layerOffsets.len == 0:
return failure "invalid tree" return failure "invalid tree"
return success last[0] var h: H
assign(h, self[].nodeData(self.layerOffsets.high(), 0))
return success h
func getProof*[H, K]( func getProof*[H, K](
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K] self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
@ -72,18 +177,19 @@ func getProof*[H, K](
var m = nleaves var m = nleaves
for i in 0 ..< depth: for i in 0 ..< depth:
let j = k xor 1 let j = k xor 1
path[i] =
if (j < m): if (j < m):
self.layers[i][j] assign(path[i], self[].nodeData(i, j))
else: else:
self.zero path[i] = self.compress.zero
k = k shr 1 k = k shr 1
m = (m + 1) shr 1 m = (m + 1) shr 1
proof.index = index proof.index = index
proof.path = path proof.path = path
proof.nleaves = nleaves proof.nleaves = nleaves
proof.compress = self.compress proof.compress = self.compress.fn
success() success()
@ -122,32 +228,169 @@ func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool = func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
success bool(root == ?proof.reconstructRoot(leaf)) success bool(root == ?proof.reconstructRoot(leaf))
func merkleTreeWorker*[H, K]( func fromNodes*[H, K](
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool self: MerkleTree[H, K],
): ?!seq[seq[H]] = compressor: CompressFn,
let a = low(xs) zero: H,
let b = high(xs) nodes: openArray[H],
let m = b - a + 1 nleaves: int,
): ?!void =
mixin assign
if nodes.len < 2: # At least leaf and root
return failure "Not enough nodes"
if nleaves == 0:
return failure "No leaves"
self.compress = CompressData[H, K](fn: compressor, nodeSize: nodes[0].len, zero: zero)
self.layerOffsets = layerOffsets(nleaves)
if self.layerOffsets[^1] + 1 != nodes.len:
return failure "bad node count"
self.store = newSeqUninit[byte](nodes.len * self.compress.nodeSize)
for i in 0 ..< nodes.len:
assign(
self[].store.toOpenArray(
i * self.compress.nodeSize, (i + 1) * self.compress.nodeSize - 1
),
nodes[i],
)
success()
func merkleTreeWorker[H, K](
store: var openArray[byte],
offsets: openArray[int],
compress: CompressData[H, K],
layer: int,
isBottomLayer: static bool,
): ?!void =
## Worker used to compute the merkle tree from the leaves that are assumed to
## already be stored at the beginning of the `store`, as done by `prepare`.
# Throughout, we use `assign` to convert from H to bytes and back, assuming
# this assignment can be done somewhat efficiently (ie memcpy) - because
# the code must work with multihash where len(H) is can differ, we cannot
# simply use a fixed-size array here.
mixin assign
template nodeData(i, j: int): openArray[byte] =
# Pick out the bytes of node j in layer i
store.nodeData(offsets, compress.nodeSize, i, j)
let m = offsets.nodesInLayer(layer)
when not isBottomLayer: when not isBottomLayer:
if m == 1: if m == 1:
return success @[@xs] return success()
let halfn: int = m div 2 let halfn: int = m div 2
let n: int = 2 * halfn let n: int = 2 * halfn
let isOdd: bool = (n != m) let isOdd: bool = (n != m)
var ys: seq[H] # Because the compression function we work with works with H and not bytes,
if not isOdd: # we need to extract H from the raw data - a little abstraction tax that
ys = newSeq[H](halfn) # ensures that properties like alignment of H are respected.
else: var a, b, tmp: H
ys = newSeq[H](halfn + 1)
for i in 0 ..< halfn: for i in 0 ..< halfn:
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key)
assign(a, nodeData(layer, i * 2))
assign(b, nodeData(layer, i * 2 + 1))
tmp = ?compress.fn(a, b, key = key)
assign(nodeData(layer + 1, i), tmp)
if isOdd: if isOdd:
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
ys[halfn] = ?self.compress(xs[n], self.zero, key = key)
success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false) assign(a, nodeData(layer, n))
tmp = ?compress.fn(a, compress.zero, key = key)
assign(nodeData(layer + 1, halfn), tmp)
merkleTreeWorker(store, offsets, compress, layer + 1, false)
proc merkleTreeWorker[H, K](
store: SharedBuf[byte],
offsets: SharedBuf[int],
compress: ptr CompressData[H, K],
signal: ThreadSignalPtr,
): bool =
defer:
discard signal.fireSync()
let res = merkleTreeWorker(
store.toOpenArray(), offsets.toOpenArray(), compress[], 0, isBottomLayer = true
)
return res.isOk()
func prepare*[H, K](
self: MerkleTree[H, K], compressor: CompressFn, zero: H, leaves: openArray[H]
): ?!void =
## Prepare the instance for computing the merkle tree of the given leaves using
## the given compression function. After preparation, `compute` should be
## called to perform the actual computation. `leaves` will be copied into the
## tree so they can be freed after the call.
if leaves.len == 0:
return failure "No leaves"
self.compress =
CompressData[H, K](fn: compressor, nodeSize: leaves[0].len, zero: zero)
self.layerOffsets = layerOffsets(leaves.len)
self.store = newSeqUninit[byte]((self.layerOffsets[^1] + 1) * self.compress.nodeSize)
for j in 0 ..< leaves.len:
assign(self[].nodeData(0, j), leaves[j])
return success()
proc compute*[H, K](self: MerkleTree[H, K]): ?!void =
merkleTreeWorker(
self.store, self.layerOffsets, self.compress, 0, isBottomLayer = true
)
proc compute*[H, K](
self: MerkleTree[H, K], tp: Taskpool
): Future[?!void] {.async: (raises: []).} =
if tp.numThreads == 1:
# With a single thread, there's no point creating a separate task
return self.compute()
# TODO this signal would benefit from reuse across computations
without signal =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
signal.close().expect("closing once works")
let res = tp.spawn merkleTreeWorker(
SharedBuf.view(self.store),
SharedBuf.view(self.layerOffsets),
addr self.compress,
signal,
)
# To support cancellation, we'd have to ensure the task we posted to taskpools
# exits early - since we're not doing that, block cancellation attempts
try:
await noCancel signal.wait()
except AsyncError as exc:
# Since we initialized the signal, the OS or chronos is misbehaving. In any
# case, it would mean the task is still running which would cause a memory
# a memory violation if we let it run - panic instead
raiseAssert "Could not wait for signal, was it initialized? " & exc.msg
if not res.sync():
return failure("merkle tree task failed")
return success()

View File

@ -1,130 +0,0 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sequtils
import pkg/poseidon2
import pkg/constantine/math/io/io_fields
import pkg/constantine/platforms/abstractions
import pkg/questionable/results
import ../utils
import ../rng
import ./merkletree
export merkletree, poseidon2
const
KeyNoneF = F.fromHex("0x0")
KeyBottomLayerF = F.fromHex("0x1")
KeyOddF = F.fromHex("0x2")
KeyOddAndBottomLayerF = F.fromHex("0x3")
Poseidon2Zero* = zero
type
Bn254Fr* = F
Poseidon2Hash* = Bn254Fr
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
KeyNone
KeyBottomLayer
KeyOdd
KeyOddAndBottomLayer
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
proc `$`*(self: Poseidon2Tree): string =
let root = if self.root.isOk: self.root.get.toHex else: "none"
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
", levels: " & $self.levels & " )"
proc `$`*(self: Poseidon2Proof): string =
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
", path: " & $self.path.mapIt(it.toHex) & " )"
func toArray32*(bytes: openArray[byte]): array[32, byte] =
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
case key
of KeyNone: KeyNoneF
of KeyBottomLayer: KeyBottomLayerF
of KeyOdd: KeyOddF
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
if leaves.len == 0:
return failure "Empty leaves"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
proc fromNodes*(
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
): ?!Poseidon2Tree =
if nodes.len == 0:
return failure "Empty nodes"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
self = Poseidon2Tree(compress: compressor, zero: zero)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ?self.getProof(index)
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
return failure "Unable to verify tree built from nodes"
success self
func init*(
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
): ?!Poseidon2Proof =
if nodes.len == 0:
return failure "Empty nodes"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
success Poseidon2Proof(
compress: compressor,
zero: Poseidon2Zero,
index: index,
nleaves: nleaves,
path: @nodes,
)

View File

@ -0,0 +1,2 @@
const CodecExts =
[("codex-manifest", 0xCD01), ("codex-block", 0xCD02), ("codex-root", 0xCD03)]

19
codex/multihash_exts.nim Normal file
View File

@ -0,0 +1,19 @@
import blscurve/bls_public_exports
import pkg/constantine/hashes
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
if len(output) > 0:
let digest = hashes.sha256.hash(data)
copyMem(addr output[0], addr digest[0], 32)
const Sha2256MultiHash* = MHash(
mcodec: multiCodec("sha2-256"),
size: sha256.sizeDigest,
coder: sha2_256hash_constantine,
)
const HashExts = [
# override sha2-256 hash function
Sha2256MultiHash
]

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -10,10 +10,10 @@
import import
std/[options, os, strutils, times, net, atomics], std/[options, os, strutils, times, net, atomics],
stew/shims/net as stewNet, stew/[objects],
stew/[objects, results],
nat_traversal/[miniupnpc, natpmp], nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net json_serialization/std/net,
results
import pkg/chronos import pkg/chronos
import pkg/chronicles import pkg/chronicles

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -19,7 +19,6 @@ import pkg/taskpools
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/chronos import pkg/chronos
import pkg/poseidon2
import pkg/libp2p/[switch, multicodec, multihash] import pkg/libp2p/[switch, multicodec, multihash]
import pkg/libp2p/stream/bufferstream import pkg/libp2p/stream/bufferstream
@ -29,7 +28,6 @@ import pkg/libp2p/routing_record
import pkg/libp2p/signed_envelope import pkg/libp2p/signed_envelope
import ./chunker import ./chunker
import ./slots
import ./clock import ./clock
import ./blocktype as bt import ./blocktype as bt
import ./manifest import ./manifest
@ -37,14 +35,11 @@ import ./merkletree
import ./stores import ./stores
import ./blockexchange import ./blockexchange
import ./streams import ./streams
import ./erasure
import ./discovery import ./discovery
import ./contracts
import ./indexingstrategy
import ./utils import ./utils
import ./errors import ./errors
import ./logutils import ./logutils
import ./utils/asynciter import ./utils/safeasynciter
import ./utils/trackedfutures import ./utils/trackedfutures
export logutils export logutils
@ -52,35 +47,28 @@ export logutils
logScope: logScope:
topics = "codex node" topics = "codex node"
const DefaultFetchBatch = 10 const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type type
Contracts* =
tuple[
client: ?ClientInteractions,
host: ?HostInteractions,
validator: ?ValidatorInteractions,
]
CodexNode* = object CodexNode* = object
switch: Switch switch: Switch
networkId: PeerId networkId: PeerId
networkStore: NetworkStore networkStore: NetworkStore
engine: BlockExcEngine engine: BlockExcEngine
prover: ?Prover
discovery: Discovery discovery: Discovery
contracts*: Contracts
clock*: Clock clock*: Clock
storage*: Contracts taskPool: Taskpool
taskpool: Taskpool
trackedFutures: TrackedFutures trackedFutures: TrackedFutures
CodexNodeRef* = ref CodexNode CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].} OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {. BatchProc* =
gcsafe, async: (raises: [CancelledError]) proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
.} OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: CodexNodeRef): Switch = func switch*(self: CodexNodeRef): Switch =
return self.switch return self.switch
@ -186,34 +174,62 @@ proc fetchBatched*(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i)) # (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# ) # )
while not iter.finished: # Sliding window: maintain batchSize blocks in-flight
let blockFutures = collect: let
for i in 0 ..< batchSize: refillThreshold = int(float(batchSize) * BatchRefillThreshold)
if not iter.finished: refillSize = max(refillThreshold, 1)
let address = BlockAddress.init(cid, iter.next()) maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
if blockFutures.len == 0: var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
var blockResults = await self.networkStore.getBlocks(addresses)
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue continue
without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err: inc(successfulBlocks)
trace "Some blocks failed to fetch", err = err.msg inc(completedInWindow)
return failure(err)
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value) if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
let numOfFailedBlocks = blockResults.len - blocks.len if completedInWindow >= refillThreshold and not iter.finished:
if numOfFailedBlocks > 0: var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
return for i in 0 ..< refillSize:
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")") if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption: if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr) return failure(batchErr)
if not iter.finished:
await sleepAsync(1.millis)
success() success()
proc fetchBatched*( proc fetchBatched*(
@ -288,20 +304,6 @@ proc streamEntireDataset(
var jobs: seq[Future[void]] var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false)) let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async: (raises: []).} =
try:
# Spawn an erasure decoding job
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CatchableError as exc:
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
jobs.add(erasureJob())
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false)) jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
@ -403,6 +405,7 @@ proc store*(
filename: ?string = string.none, filename: ?string = string.none,
mimetype: ?string = string.none, mimetype: ?string = string.none,
blockSize = DefaultBlockSize, blockSize = DefaultBlockSize,
onBlockStored: OnBlockStoredProc = nil,
): Future[?!Cid] {.async.} = ): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize ## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest ## to nodes's BlockStore, and return Cid of its manifest
@ -432,6 +435,9 @@ proc store*(
if err =? (await self.networkStore.putBlock(blk)).errorOption: if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}") return failure(&"Unable to store block {blk.cid}")
if not onBlockStored.isNil:
onBlockStored(chunk)
except CancelledError as exc: except CancelledError as exc:
raise exc raise exc
except CatchableError as exc: except CatchableError as exc:
@ -439,7 +445,7 @@ proc store*(
finally: finally:
await stream.close() await stream.close()
without tree =? CodexTree.init(cids), err: without tree =? (await CodexTree.init(self.taskPool, cids)), err:
return failure(err) return failure(err)
without treeCid =? tree.rootCid(CIDv1, dataCodec), err: without treeCid =? tree.rootCid(CIDv1, dataCodec), err:
@ -495,286 +501,11 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
onManifest(cid, manifest) onManifest(cid, manifest)
proc setupRequest(
self: CodexNodeRef,
cid: Cid,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!StorageRequest] {.async.} =
## Setup slots for a given dataset
##
let
ecK = nodes - tolerance
ecM = tolerance
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateralPerByte = collateralPerByte
expiry = expiry
ecK = ecK
ecM = ecM
trace "Setting up slots"
without manifest =? await self.fetchManifest(cid), error:
trace "Unable to fetch manifest for cid"
return failure error
# Erasure code the dataset according to provided parameters
let erasure = Erasure.new(
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
trace "Unable to erasure code dataset"
return failure(error)
without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err:
trace "Unable to create slot builder"
return failure(err)
without verifiable =? (await builder.buildManifest()), err:
trace "Unable to build verifiable manifest"
return failure(err)
without manifestBlk =? await self.storeManifest(verifiable), err:
trace "Unable to store verifiable manifest"
return failure(err)
let
verifyRoot =
if builder.verifyRoot.isNone:
return failure("No slots root")
else:
builder.verifyRoot.get.toBytes
request = StorageRequest(
ask: StorageAsk(
slots: verifiable.numSlots.uint64,
slotSize: builder.slotBytes.uint64,
duration: duration,
proofProbability: proofProbability,
pricePerBytePerSecond: pricePerBytePerSecond,
collateralPerByte: collateralPerByte,
maxSlotLoss: tolerance,
),
content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot),
expiry: expiry,
)
trace "Request created", request = $request
success request
proc requestStorage*(
self: CodexNodeRef,
cid: Cid,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!PurchaseId] {.async.} =
## Initiate a request for storage sequence, this might
## be a multistep procedure.
##
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateralPerByte = collateralPerByte
expiry = expiry
now = self.clock.now
trace "Received a request for storage!"
without contracts =? self.contracts.client:
trace "Purchasing not available"
return failure "Purchasing not available"
without request =? (
await self.setupRequest(
cid, duration, proofProbability, nodes, tolerance, pricePerBytePerSecond,
collateralPerByte, expiry,
)
), err:
trace "Unable to setup request"
return failure err
let purchase = await contracts.purchasing.purchase(request)
success purchase.id
proc onStore(
self: CodexNodeRef,
request: StorageRequest,
expiry: SecondsSince1970,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
): Future[?!void] {.async: (raises: [CancelledError]).} =
## store data in local storage
##
let cid = request.content.cid
logScope:
cid = $cid
slotIdx = slotIdx
trace "Received a request to store a slot"
# TODO: Use the isRepairing to manage the slot download.
# If isRepairing is true, the slot has to be repaired before
# being downloaded.
without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err)
without builder =?
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
trace "Unable to create slots builder", err = err.msg
return failure(err)
if slotIdx > manifest.slotRoots.high.uint64:
trace "Slot index not in manifest", slotIdx
return failure(newException(CodexError, "Slot index not in manifest"))
proc updateExpiry(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg
return failure(err)
return success()
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err:
trace "Unable to get indicies from strategy", err = err.msg
return failure(err)
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
trace "Unable to build slot", err = err.msg
return failure(err)
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
return failure(newException(CodexError, "Slot root mismatch"))
trace "Slot successfully retrieved and reconstructed"
return success()
proc onProve(
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
## Generats a proof for a given slot and challenge
##
let
cidStr = $slot.request.content.cid
slotIdx = slot.slotIndex
logScope:
cid = cidStr
slot = slotIdx
challenge = challenge
trace "Received proof challenge"
if prover =? self.prover:
trace "Prover enabled"
without cid =? Cid.init(cidStr).mapFailure, err:
error "Unable to parse Cid", cid, err = err.msg
return failure(err)
without manifest =? await self.fetchManifest(cid), err:
error "Unable to fetch manifest for cid", err = err.msg
return failure(err)
when defined(verify_circuit):
without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge),
err:
error "Unable to generate proof", err = err.msg
return failure(err)
without checked =? await prover.verify(proof, inputs), err:
error "Unable to verify proof", err = err.msg
return failure(err)
if not checked:
error "Proof verification failed"
return failure("Proof verification failed")
trace "Proof verified successfully"
else:
without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err:
error "Unable to generate proof", err = err.msg
return failure(err)
let groth16Proof = proof.toGroth16Proof()
trace "Proof generated successfully", groth16Proof
success groth16Proof
else:
warn "Prover not enabled"
failure "Prover not enabled"
proc onExpiryUpdate( proc onExpiryUpdate(
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970 self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raises: [CancelledError]).} = ): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateExpiry(rootCid, expiry) return await self.updateExpiry(rootCid, expiry)
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
discard
proc start*(self: CodexNodeRef) {.async.} = proc start*(self: CodexNodeRef) {.async.} =
if not self.engine.isNil: if not self.engine.isNil:
await self.engine.start() await self.engine.start()
@ -785,66 +516,12 @@ proc start*(self: CodexNodeRef) {.async.} =
if not self.clock.isNil: if not self.clock.isNil:
await self.clock.start() await self.clock.start()
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onStore(request, expiry, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
self.onClear(request, slotIndex)
hostContracts.sales.onProve = proc(
slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
# TODO: generate proof
self.onProve(slot, challenge)
try:
await hostContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start host contract interactions", error = error.msg
self.contracts.host = HostInteractions.none
if clientContracts =? self.contracts.client:
try:
await clientContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start client contract interactions: ", error = error.msg
self.contracts.client = ClientInteractions.none
if validatorContracts =? self.contracts.validator:
try:
await validatorContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start validator contract interactions: ", error = error.msg
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} = proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node" trace "Stopping node"
if not self.taskpool.isNil:
self.taskpool.shutdown()
await self.trackedFutures.cancelTracked() await self.trackedFutures.cancelTracked()
if not self.engine.isNil: if not self.engine.isNil:
@ -853,18 +530,10 @@ proc stop*(self: CodexNodeRef) {.async.} =
if not self.discovery.isNil: if not self.discovery.isNil:
await self.discovery.stop() await self.discovery.stop()
if clientContracts =? self.contracts.client:
await clientContracts.stop()
if hostContracts =? self.contracts.host:
await hostContracts.stop()
if validatorContracts =? self.contracts.validator:
await validatorContracts.stop()
if not self.clock.isNil: if not self.clock.isNil:
await self.clock.stop() await self.clock.stop()
proc close*(self: CodexNodeRef) {.async.} =
if not self.networkStore.isNil: if not self.networkStore.isNil:
await self.networkStore.close await self.networkStore.close
@ -875,8 +544,6 @@ proc new*(
engine: BlockExcEngine, engine: BlockExcEngine,
discovery: Discovery, discovery: Discovery,
taskpool: Taskpool, taskpool: Taskpool,
prover = Prover.none,
contracts = Contracts.default,
): CodexNodeRef = ): CodexNodeRef =
## Create new instance of a Codex self, call `start` to run it ## Create new instance of a Codex self, call `start` to run it
## ##
@ -885,9 +552,14 @@ proc new*(
switch: switch, switch: switch,
networkStore: networkStore, networkStore: networkStore,
engine: engine, engine: engine,
prover: prover,
discovery: discovery, discovery: discovery,
taskPool: taskpool, taskPool: taskpool,
contracts: contracts,
trackedFutures: TrackedFutures(), trackedFutures: TrackedFutures(),
) )
proc hasLocalBlock*(
self: CodexNodeRef, cid: Cid
): Future[bool] {.async: (raises: [CancelledError]).} =
## Returns true if the given Cid is present in the local store
return await (cid in self.networkStore.localStore)

View File

@ -1,17 +0,0 @@
import pkg/stint
type
Periodicity* = object
seconds*: uint64
Period* = uint64
Timestamp* = uint64
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
timestamp div periodicity.seconds
func periodStart*(periodicity: Periodicity, period: Period): Timestamp =
period * periodicity.seconds
func periodEnd*(periodicity: Periodicity, period: Period): Timestamp =
periodicity.periodStart(period + 1)

View File

@ -1,74 +0,0 @@
import std/tables
import pkg/stint
import pkg/chronos
import pkg/questionable
import pkg/nimcrypto
import ./market
import ./clock
import ./purchasing/purchase
export questionable
export chronos
export market
export purchase
type
Purchasing* = ref object
market*: Market
clock: Clock
purchases: Table[PurchaseId, Purchase]
proofProbability*: UInt256
PurchaseTimeout* = Timeout
const DefaultProofProbability = 100.u256
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
proc load*(purchasing: Purchasing) {.async.} =
let market = purchasing.market
let requestIds = await market.myRequests()
for requestId in requestIds:
let purchase = Purchase.new(requestId, purchasing.market, purchasing.clock)
purchase.load()
purchasing.purchases[purchase.id] = purchase
proc start*(purchasing: Purchasing) {.async.} =
await purchasing.load()
proc stop*(purchasing: Purchasing) {.async.} =
discard
proc populate*(
purchasing: Purchasing, request: StorageRequest
): Future[StorageRequest] {.async.} =
result = request
if result.ask.proofProbability == 0.u256:
result.ask.proofProbability = purchasing.proofProbability
if result.nonce == Nonce.default:
var id = result.nonce.toArray
doAssert randomBytes(id) == 32
result.nonce = Nonce(id)
result.client = await purchasing.market.getSigner()
proc purchase*(
purchasing: Purchasing, request: StorageRequest
): Future[Purchase] {.async.} =
let request = await purchasing.populate(request)
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
purchase.start()
purchasing.purchases[purchase.id] = purchase
return purchase
func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase =
if purchasing.purchases.hasKey(id):
some purchasing.purchases[id]
else:
none Purchase
func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
var pIds: seq[PurchaseId] = @[]
for key in purchasing.purchases.keys:
pIds.add(key)
return pIds

View File

@ -1,74 +0,0 @@
import ./statemachine
import ./states/pending
import ./states/unknown
import ./purchaseid
# Purchase is implemented as a state machine.
#
# It can either be a new (pending) purchase that still needs to be submitted
# on-chain, or it is a purchase that was previously submitted on-chain, and
# we're just restoring its (unknown) state after a node restart.
#
# |
# v
# ------------------------- unknown
# | / /
# v v /
# pending ----> submitted ----> started ---------> finished <----/
# \ \ /
# \ ------------> failed <----/
# \ /
# --> cancelled <-----------------------
export Purchase
export purchaseid
export statemachine
func new*(
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
): Purchase =
## create a new instance of a Purchase
##
var purchase = Purchase.new()
{.cast(noSideEffect).}:
purchase.future = newFuture[void]()
purchase.requestId = requestId
purchase.market = market
purchase.clock = clock
return purchase
func new*(
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
): Purchase =
## Create a new purchase using the given market and clock
let purchase = Purchase.new(request.id, market, clock)
purchase.request = some request
return purchase
proc start*(purchase: Purchase) =
purchase.start(PurchasePending())
proc load*(purchase: Purchase) =
purchase.start(PurchaseUnknown())
proc wait*(purchase: Purchase) {.async.} =
await purchase.future
func id*(purchase: Purchase): PurchaseId =
PurchaseId(purchase.requestId)
func finished*(purchase: Purchase): bool =
purchase.future.finished
func error*(purchase: Purchase): ?(ref CatchableError) =
if purchase.future.failed:
some purchase.future.error
else:
none (ref CatchableError)
func state*(purchase: Purchase): ?string =
proc description(state: State): string =
$state
purchase.query(description)

View File

@ -1,14 +0,0 @@
import std/hashes
import ../logutils
type PurchaseId* = distinct array[32, byte]
logutils.formatIt(LogFormat.textLines, PurchaseId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId):
it.to0xHexLog
proc hash*(x: PurchaseId): Hash {.borrow.}
proc `==`*(x, y: PurchaseId): bool {.borrow.}
proc toHex*(x: PurchaseId): string =
array[32, byte](x).toHex

View File

@ -1,19 +0,0 @@
import ../utils/asyncstatemachine
import ../market
import ../clock
import ../errors
export market
export clock
export asyncstatemachine
type
Purchase* = ref object of Machine
future*: Future[void]
market*: Market
clock*: Clock
requestId*: RequestId
request*: ?StorageRequest
PurchaseState* = ref object of State
PurchaseError* = object of CodexError

View File

@ -1,35 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./error
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
logScope:
topics = "marketplace purchases cancelled"
type PurchaseCancelled* = ref object of PurchaseState
method `$`*(state: PurchaseCancelled): string =
"cancelled"
method run*(
state: PurchaseCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_cancelled.inc()
let purchase = Purchase(machine)
try:
warn "Request cancelled, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
let error = newException(Timeout, "Purchase cancelled due to timeout")
purchase.future.fail(error)
except CancelledError as e:
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseCancelled.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,26 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
declareCounter(codex_purchases_error, "codex purchases error")
logScope:
topics = "marketplace purchases errored"
type PurchaseErrored* = ref object of PurchaseState
error*: ref CatchableError
method `$`*(state: PurchaseErrored): string =
"errored"
method run*(
state: PurchaseErrored, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_error.inc()
let purchase = Purchase(machine)
error "Purchasing error",
error = state.error.msgDetail, requestId = purchase.requestId
purchase.future.fail(state.error)

View File

@ -1,30 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../logutils
import ../../utils/exceptions
import ./error
declareCounter(codex_purchases_failed, "codex purchases failed")
type PurchaseFailed* = ref object of PurchaseState
method `$`*(state: PurchaseFailed): string =
"failed"
method run*(
state: PurchaseFailed, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_failed.inc()
let purchase = Purchase(machine)
try:
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
except CancelledError as e:
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFailed.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
let error = newException(PurchaseError, "Purchase failed")
return some State(PurchaseErrored(error: error))

View File

@ -1,33 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
import ./error
declareCounter(codex_purchases_finished, "codex purchases finished")
logScope:
topics = "marketplace purchases finished"
type PurchaseFinished* = ref object of PurchaseState
method `$`*(state: PurchaseFinished): string =
"finished"
method run*(
state: PurchaseFinished, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_finished.inc()
let purchase = Purchase(machine)
try:
info "Purchase finished, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
purchase.future.complete()
except CancelledError as e:
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFinished.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,28 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./submitted
import ./error
declareCounter(codex_purchases_pending, "codex purchases pending")
type PurchasePending* = ref object of PurchaseState
method `$`*(state: PurchasePending): string =
"pending"
method run*(
state: PurchasePending, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_pending.inc()
let purchase = Purchase(machine)
try:
let request = !purchase.request
await purchase.market.requestStorage(request)
return some State(PurchaseSubmitted())
except CancelledError as e:
trace "PurchasePending.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchasePending.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,54 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_started, "codex purchases started")
logScope:
topics = "marketplace purchases started"
type PurchaseStarted* = ref object of PurchaseState
method `$`*(state: PurchaseStarted): string =
"started"
method run*(
state: PurchaseStarted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_started.inc()
let purchase = Purchase(machine)
let clock = purchase.clock
let market = purchase.market
info "All required slots filled, purchase started", requestId = purchase.requestId
let failed = newFuture[void]()
proc callback(_: RequestId) =
failed.complete()
var ended: Future[void]
try:
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
# Ensure that we're past the request end by waiting an additional second
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
let fut = await one(ended, failed)
await subscription.unsubscribe()
if fut.id == failed.id:
ended.cancelSoon()
return some State(PurchaseFailed())
else:
failed.cancelSoon()
return some State(PurchaseFinished())
except CancelledError as e:
ended.cancelSoon()
failed.cancelSoon()
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseStarted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,56 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./started
import ./cancelled
import ./error
logScope:
topics = "marketplace purchases submitted"
declareCounter(codex_purchases_submitted, "codex purchases submitted")
type PurchaseSubmitted* = ref object of PurchaseState
method `$`*(state: PurchaseSubmitted): string =
"submitted"
method run*(
state: PurchaseSubmitted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_submitted.inc()
let purchase = Purchase(machine)
let request = !purchase.request
let market = purchase.market
let clock = purchase.clock
info "Request submitted, waiting for slots to be filled",
requestId = purchase.requestId
proc wait() {.async.} =
let done = newAsyncEvent()
proc callback(_: RequestId) =
done.fire()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done.wait()
await subscription.unsubscribe()
proc withTimeout(future: Future[void]) {.async.} =
let expiry = (await market.requestExpiresAt(request.id)) + 1
trace "waiting for request fulfillment or expiry", expiry
await future.withTimeout(clock, expiry)
try:
await wait().withTimeout()
except Timeout:
return some State(PurchaseCancelled())
except CancelledError as e:
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseSubmitted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
return some State(PurchaseStarted())

View File

@ -1,44 +0,0 @@
import pkg/metrics
import ../../utils/exceptions
import ../../logutils
import ../statemachine
import ./submitted
import ./started
import ./cancelled
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_unknown, "codex purchases unknown")
type PurchaseUnknown* = ref object of PurchaseState
method `$`*(state: PurchaseUnknown): string =
"unknown"
method run*(
state: PurchaseUnknown, machine: Machine
): Future[?State] {.async: (raises: []).} =
try:
codex_purchases_unknown.inc()
let purchase = Purchase(machine)
if (request =? await purchase.market.getRequest(purchase.requestId)) and
(requestState =? await purchase.market.requestState(purchase.requestId)):
purchase.request = some request
case requestState
of RequestState.New:
return some State(PurchaseSubmitted())
of RequestState.Started:
return some State(PurchaseStarted())
of RequestState.Cancelled:
return some State(PurchaseCancelled())
of RequestState.Finished:
return some State(PurchaseFinished())
of RequestState.Failed:
return some State(PurchaseFailed())
except CancelledError as e:
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseUnknown.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import std/sequtils import std/sequtils
import std/mimetypes import std/mimetypes
@ -33,8 +30,6 @@ import ../logutils
import ../node import ../node
import ../blocktype import ../blocktype
import ../conf import ../conf
import ../contracts
import ../erasure/erasure
import ../manifest import ../manifest
import ../streams/asyncstreamwrapper import ../streams/asyncstreamwrapper
import ../stores import ../stores
@ -119,9 +114,7 @@ proc retrieveCid(
# For erasure-coded datasets, we need to return the _original_ length; i.e., # For erasure-coded datasets, we need to return the _original_ length; i.e.,
# the length of the non-erasure-coded dataset, as that's what we will be # the length of the non-erasure-coded dataset, as that's what we will be
# returning to the client. # returning to the client.
let contentLength = resp.setHeader("Content-Length", $(manifest.datasetSize.int))
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
resp.setHeader("Content-Length", $(contentLength.int))
await resp.prepare(HttpResponseStreamType.Plain) await resp.prepare(HttpResponseStreamType.Plain)
@ -183,7 +176,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) = proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
router.api(MethodOptions, "/api/codex/v1/data") do( router.api(MethodOptions, "/api/storage/v1/data") do(
resp: HttpResponseRef resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
if corsOrigin =? allowedOrigin: if corsOrigin =? allowedOrigin:
@ -195,7 +188,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse: router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner ## Upload a file in a streaming manner
## ##
@ -257,11 +250,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
finally: finally:
await reader.closeWait() await reader.closeWait()
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse:
let json = await formatManifestBlocks(node) let json = await formatManifestBlocks(node)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do( router.api(MethodOptions, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
if corsOrigin =? allowedOrigin: if corsOrigin =? allowedOrigin:
@ -270,7 +263,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.api(MethodGet, "/api/codex/v1/data/{cid}") do( router.api(MethodGet, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -286,7 +279,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
await node.retrieveCid(cid.get(), local = true, resp = resp) await node.retrieveCid(cid.get(), local = true, resp = resp)
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do( router.api(MethodDelete, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Deletes either a single block or an entire dataset ## Deletes either a single block or an entire dataset
@ -307,7 +300,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Download a file from the network to the local node ## Download a file from the network to the local node
@ -328,7 +321,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest) let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do( router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Download a file from the network in a streaming ## Download a file from the network in a streaming
@ -347,7 +340,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition") resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp) await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Download only the manifest. ## Download only the manifest.
@ -365,7 +358,23 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest) let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Only test if the give CID is available in the local store
##
var headers = buildCorsHeaders("GET", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
let cid = cid.get()
let hasCid = await node.hasLocalBlock(cid)
let json = %*{$cid: hasCid}
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
let json = let json =
%RestRepoStore( %RestRepoStore(
totalBlocks: repoStore.totalBlocks, totalBlocks: repoStore.totalBlocks,
@ -375,465 +384,12 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
) )
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
## Returns active slots for the host
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
let json = %(await contracts.sales.mySlots())
return RestApiResponse.response(
$json, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do(
slotId: SlotId
) -> RestApiResponse:
## Returns active slot with id {slotId} for the host. Returns 404 if the
## slot is not active for the host.
var headers = buildCorsHeaders("GET", allowedOrigin)
without contracts =? node.contracts.host:
return
RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
without slotId =? slotId.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
without agent =? await contracts.sales.activeSale(slotId):
return
RestApiResponse.error(Http404, "Provider not filling slot", headers = headers)
let restAgent = RestSalesAgent(
state: agent.state() |? "none",
slotIndex: agent.data.slotIndex,
requestId: agent.data.requestId,
request: agent.data.request,
reservation: agent.data.reservation,
)
return RestApiResponse.response(
restAgent.toJson, contentType = "application/json", headers = headers
)
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
## Returns storage that is for sale
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without avails =? (await contracts.sales.context.reservations.all(Availability)),
err:
return RestApiResponse.error(Http500, err.msg, headers = headers)
let json = %avails
return RestApiResponse.response(
$json, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
## Add available storage to sell.
## Every time Availability's offer finishes, its capacity is
## returned to the availability.
##
## totalSize - size of available storage in bytes
## duration - maximum time the storage should be sold for (in seconds)
## minPricePerBytePerSecond - minimal price per byte paid (in amount of
## tokens) to be matched against the request's pricePerBytePerSecond
## totalCollateral - total collateral (in amount of
## tokens) that can be distributed among matching requests
var headers = buildCorsHeaders("POST", allowedOrigin)
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
let body = await request.getBody()
without restAv =? RestAvailability.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let reservations = contracts.sales.context.reservations
if restAv.totalSize == 0:
return RestApiResponse.error(
Http422, "Total size must be larger then zero", headers = headers
)
if restAv.duration == 0:
return RestApiResponse.error(
Http422, "duration must be larger then zero", headers = headers
)
if restAv.minPricePerBytePerSecond == 0:
return RestApiResponse.error(
Http422,
"minPricePerBytePerSecond must be larger then zero",
headers = headers,
)
if restAv.totalCollateral == 0:
return RestApiResponse.error(
Http422, "totalCollateral must be larger then zero", headers = headers
)
if not reservations.hasAvailable(restAv.totalSize):
return
RestApiResponse.error(Http422, "Not enough storage quota", headers = headers)
without availability =? (
await reservations.createAvailability(
restAv.totalSize,
restAv.duration,
restAv.minPricePerBytePerSecond,
restAv.totalCollateral,
enabled = restAv.enabled |? true,
until = restAv.until |? 0,
)
), error:
if error of CancelledError:
raise error
if error of UntilOutOfBoundsError:
return RestApiResponse.error(Http422, error.msg)
return RestApiResponse.error(Http500, error.msg, headers = headers)
return RestApiResponse.response(
availability.toJson,
Http201,
contentType = "application/json",
headers = headers,
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do(
id: AvailabilityId, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
resp.setCorsHeaders("PATCH", corsOrigin)
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do(
id: AvailabilityId
) -> RestApiResponse:
## Updates Availability.
## The new parameters will be only considered for new requests.
## Existing Requests linked to this Availability will continue as is.
##
## totalSize - size of available storage in bytes.
## When decreasing the size, then lower limit is
## the currently `totalSize - freeSize`.
## duration - maximum time the storage should be sold for (in seconds)
## minPricePerBytePerSecond - minimal price per byte paid (in amount of
## tokens) to be matched against the request's pricePerBytePerSecond
## totalCollateral - total collateral (in amount of
## tokens) that can be distributed among matching requests
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(Http503, "Persistence is not enabled")
without id =? id.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg)
without keyId =? id.key.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg)
let
body = await request.getBody()
reservations = contracts.sales.context.reservations
type OptRestAvailability = Optionalize(RestAvailability)
without restAv =? OptRestAvailability.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg)
without availability =? (await reservations.get(keyId, Availability)), error:
if error of NotExistsError:
return RestApiResponse.error(Http404, "Availability not found")
return RestApiResponse.error(Http500, error.msg)
if isSome restAv.freeSize:
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
if size =? restAv.totalSize:
if size == 0:
return RestApiResponse.error(Http422, "Total size must be larger then zero")
# we don't allow lowering the totalSize bellow currently utilized size
if size < (availability.totalSize - availability.freeSize):
return RestApiResponse.error(
Http422,
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
$(availability.totalSize - availability.freeSize),
)
if not reservations.hasAvailable(size):
return RestApiResponse.error(Http422, "Not enough storage quota")
availability.freeSize += size - availability.totalSize
availability.totalSize = size
if duration =? restAv.duration:
availability.duration = duration
if minPricePerBytePerSecond =? restAv.minPricePerBytePerSecond:
availability.minPricePerBytePerSecond = minPricePerBytePerSecond
if totalCollateral =? restAv.totalCollateral:
availability.totalCollateral = totalCollateral
if until =? restAv.until:
availability.until = until
if enabled =? restAv.enabled:
availability.enabled = enabled
if err =? (await reservations.update(availability)).errorOption:
if err of CancelledError:
raise err
if err of UntilOutOfBoundsError:
return RestApiResponse.error(Http422, err.msg)
else:
return RestApiResponse.error(Http500, err.msg)
return RestApiResponse.response(Http204)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500)
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do(
id: AvailabilityId
) -> RestApiResponse:
## Gets Availability's reservations.
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without id =? id.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
without keyId =? id.key.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let reservations = contracts.sales.context.reservations
let market = contracts.sales.context.market
if error =? (await reservations.get(keyId, Availability)).errorOption:
if error of NotExistsError:
return
RestApiResponse.error(Http404, "Availability not found", headers = headers)
else:
return RestApiResponse.error(Http500, error.msg, headers = headers)
without availabilitysReservations =? (await reservations.all(Reservation, id)),
err:
return RestApiResponse.error(Http500, err.msg, headers = headers)
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
return RestApiResponse.response(
availabilitysReservations.toJson,
contentType = "application/json",
headers = headers,
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do(
cid: Cid
) -> RestApiResponse:
var headers = buildCorsHeaders("POST", allowedOrigin)
## Create a request for storage
##
## cid - the cid of a previously uploaded dataset
## duration - the duration of the request in seconds
## proofProbability - how often storage proofs are required
## pricePerBytePerSecond - the amount of tokens paid per byte per second to hosts the client is willing to pay
## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data
## nodes - number of nodes the content should be stored on
## tolerance - allowed number of nodes that can be lost before content is lost
## colateralPerByte - requested collateral per byte from hosts when they fill slot
try:
without contracts =? node.contracts.client:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without cid =? cid.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let body = await request.getBody()
without params =? StorageRequestParams.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let expiry = params.expiry
if expiry <= 0 or expiry >= params.duration:
return RestApiResponse.error(
Http422,
"Expiry must be greater than zero and less than the request's duration",
headers = headers,
)
if params.proofProbability <= 0:
return RestApiResponse.error(
Http422, "Proof probability must be greater than zero", headers = headers
)
if params.collateralPerByte <= 0:
return RestApiResponse.error(
Http422, "Collateral per byte must be greater than zero", headers = headers
)
if params.pricePerBytePerSecond <= 0:
return RestApiResponse.error(
Http422,
"Price per byte per second must be greater than zero",
headers = headers,
)
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
if params.duration > requestDurationLimit:
return RestApiResponse.error(
Http422,
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
headers = headers,
)
let nodes = params.nodes |? 3
let tolerance = params.tolerance |? 1
if tolerance == 0:
return RestApiResponse.error(
Http422, "Tolerance needs to be bigger then zero", headers = headers
)
# prevent underflow
if tolerance > nodes:
return RestApiResponse.error(
Http422,
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
headers = headers,
)
let ecK = nodes - tolerance
let ecM = tolerance # for readability
# ensure leopard constrainst of 1 < K ≥ M
if ecK <= 1 or ecK < ecM:
return RestApiResponse.error(
Http422,
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
headers = headers,
)
without purchaseId =?
await node.requestStorage(
cid, params.duration, params.proofProbability, nodes, tolerance,
params.pricePerBytePerSecond, params.collateralPerByte, expiry,
), error:
if error of InsufficientBlocksError:
return RestApiResponse.error(
Http422,
"Dataset too small for erasure parameters, need at least " &
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
headers = headers,
)
return RestApiResponse.error(Http500, error.msg, headers = headers)
return RestApiResponse.response(purchaseId.toHex)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do(
id: PurchaseId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.client:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without id =? id.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
without purchase =? contracts.purchasing.getPurchase(id):
return RestApiResponse.error(Http404, headers = headers)
let json =
%RestPurchase(
state: purchase.state |? "none",
error: purchase.error .? msg,
request: purchase.request,
requestId: purchase.requestId,
)
return RestApiResponse.response(
$json, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.client:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
let purchaseIds = contracts.purchasing.getPurchaseIds()
return RestApiResponse.response(
$ %purchaseIds, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin let allowedOrigin = router.allowedOrigin
## various node management api's ## various node management api's
## ##
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse:
## Returns node SPR in requested format, json or text. ## Returns node SPR in requested format, json or text.
## ##
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -856,7 +412,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse:
## Returns node's peerId in requested format, json or text. ## Returns node's peerId in requested format, json or text.
## ##
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -875,7 +431,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do( router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do(
peerId: PeerId, addrs: seq[MultiAddress] peerId: PeerId, addrs: seq[MultiAddress]
) -> RestApiResponse: ) -> RestApiResponse:
## Connect to a peer ## Connect to a peer
@ -913,7 +469,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse:
## Print rudimentary node information ## Print rudimentary node information
## ##
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -933,11 +489,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
"", "",
"announceAddresses": node.discovery.announceAddrs, "announceAddresses": node.discovery.announceAddrs,
"table": table, "table": table,
"codex": { "storage": {"version": $codexVersion, "revision": $codexRevision},
"version": $codexVersion,
"revision": $codexRevision,
"contracts": $codexContractsRevision,
},
} }
# return pretty json for human readability # return pretty json for human readability
@ -948,7 +500,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do( router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do(
level: Option[string] level: Option[string]
) -> RestApiResponse: ) -> RestApiResponse:
## Set log level at run time ## Set log level at run time
@ -974,8 +526,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
when codex_enable_api_debug_peers: when storage_enable_api_debug_peers:
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do( router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do(
peerId: PeerId peerId: PeerId
) -> RestApiResponse: ) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -1003,8 +555,6 @@ proc initRestApi*(
var router = RestRouter.init(validate, corsAllowedOrigin) var router = RestRouter.init(validate, corsAllowedOrigin)
initDataApi(node, repoStore, router) initDataApi(node, repoStore, router)
initSalesApi(node, router)
initPurchasingApi(node, router)
initNodeApi(node, conf, router) initNodeApi(node, conf, router)
initDebugApi(node, conf, router) initDebugApi(node, conf, router)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -17,8 +17,6 @@ import pkg/stew/byteutils
import pkg/results import pkg/results
import pkg/stint import pkg/stint
import ../sales
import ../purchasing
import ../utils/stintutils import ../utils/stintutils
proc encodeString*(cid: type Cid): Result[string, cstring] = proc encodeString*(cid: type Cid): Result[string, cstring] =
@ -82,11 +80,6 @@ proc decodeString*(
except ValueError as e: except ValueError as e:
err e.msg.cstring err e.msg.cstring
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
_: type T, value: string
): Result[T, cstring] =
array[32, byte].decodeString(value).map(id => T(id))
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] = proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
ok(value) ok(value)

Some files were not shown because too many files have changed in this diff Show More