Compare commits

...

84 Commits

Author SHA1 Message Date
Giuliano Mega
4b82d7f82d
fix: Remove unused (and incorrect) verify call from Merkle tree (#1416) 2026-04-30 21:21:47 +00:00
Chrysostomos Nanakos
76c4b23c62
fix: re-seed DHT routing table when empty (#1428)
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
2026-04-29 17:00:32 +00:00
Chrysostomos Nanakos
bb6ab1befa
chore: Block exchange protocol rewrite (#1411)
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
2026-04-25 00:37:42 +00:00
Arnaud
2dd97631ed
chore: update nim 2.2.8 (#1424)
Signed-off-by: Arnaud <arno.deville@gmail.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2026-04-22 13:57:14 +00:00
Arnaud
4c9ecd0766
feat: add a warning when private address only (#1423)
Signed-off-by: Arnaud <arno.deville@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2026-04-21 11:36:02 +00:00
Arnaud
8dd2356000
chore: update chronicles (#1422) 2026-04-21 06:05:00 +00:00
Arnaud
79d59dc66c
fix: cleanup chronicles writer in close function (#1413)
Signed-off-by: Arnaud <arno.deville@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2026-04-13 08:40:14 +00:00
Arnaud
ffc86c23d7
fix: nph build (#1419) 2026-04-13 08:20:00 +00:00
Arnaud
e8bc5c46e9
chore: cleanup dependencies (#1415)
Signed-off-by: Arnaud <arno.deville@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2026-04-13 06:07:38 +00:00
Eric
4ea8350612
fix(repostore): prevent underflow when deleting datasets (#1412) 2026-03-31 05:25:08 +00:00
5ecd82eeaa
mit-license: add copyright owner Logos
Signed-off-by: markoburcul <marko@status.im>
2026-03-24 18:19:06 +01:00
Eric
3bc6aa8b8d
feat(cli)!: change listen addr to listen port (#1409) 2026-02-24 10:03:48 +00:00
Eric
ab413bdfcf
chore!: Finish renaming Codex to Logos Storage (#1399) 2026-02-19 04:59:15 +00:00
Eric
4068bcb2ed
fix: bump nph and refactor build process in makefile (#1410) 2026-02-19 02:12:45 +00:00
Eric
fef46aee35
feat(testing): local libstorage c bindings test (#1407) 2026-02-18 04:29:59 +00:00
e375223500
fix: nix /tmp permission errors on MacOS
Because on MacOS Nix does not have proper filesystem namespacing like on
linux the /tmp filesystem is the real /tmp on the machine.

This can cause permission issues when different Nix build users creat it:

> cannot create directory: /tmp/nim/koch_d

Signed-off-by: Jakub Sokołowski <jakub@status.im>
2026-02-13 12:28:50 +01:00
Eric
d02f44ffd6
feat(libstorage): enable rest api in libstorage in debug builds (#1406) 2026-02-11 08:25:53 +00:00
Giuliano Mega
3830ecc9e6
chore: bump nim-datastore (#1405) 2026-02-09 13:59:25 -03:00
Giuliano Mega
61791536e8
fix: add callback call to storage_destroy to keep consistency (#1401) 2026-02-09 14:03:55 +00:00
Giuliano Mega
a5ce5e1fb6
chore: bump nim-datastore (#1402) 2026-02-06 17:10:20 -03:00
Giuliano Mega
1a7cf2d335
fix: propagate callback cancellation (#1400) 2026-02-05 14:33:23 +00:00
Arnaud
3203dfba41
chore: remove circom compat from nix (#1388) 2026-02-03 06:51:30 +00:00
Eric
0a6c2a5796
fix(ci): release process producing wrong artifacts (#1398) 2026-02-03 02:47:46 +00:00
Eric
44ad291b16
refactor!: move merkletree to own repo (#1390) 2026-02-02 22:43:14 +00:00
Adam Uhlíř
c6345fd6f7
build(docker): ignore ipv6 for NAT_IP_AUTO (#1393) 2026-01-30 08:41:28 +00:00
Giuliano Mega
9ab8e1d671
fix: don't release unless there's a tag (#1394) 2026-01-29 16:09:37 +00:00
Giuliano Mega
7c3894da54
fix: actually put the port mapper thread to sleep (#1392) 2026-01-29 13:17:27 +00:00
Eric
52d27485cd
fix(ci): Add version and tagged release variables to release workflow (#1391) 2026-01-28 06:09:17 +00:00
Eric
1c970e9ff6
chore(ci): Rename release artifacts (#1389) 2026-01-27 23:47:13 +00:00
Adam Uhlíř
e596de78c2
build(docker): rename codex to logos-storage (#1387) 2026-01-27 17:32:02 +00:00
Arnaud
7d51740f91
fix: correct zip name for libstorage zip (#1386) 2026-01-27 07:11:01 +00:00
Adam Uhlíř
4fecccc539
ci: don't run empty contract and tools tests (#1385) 2026-01-26 16:44:16 +00:00
Adam Uhlíř
f8d3e0d3be
ci: build images for dist-tests (#1384) 2026-01-22 15:16:48 +00:00
Arnaud
ec5826ecb0
feat: include artifacts deployment in release process (#1383) 2026-01-22 11:49:58 +00:00
Arnaud
3d49e9f2fa
fix: artifacts CI (#1380) 2026-01-22 10:41:27 +00:00
Giuliano Mega
3c09f008bb
fix: downsizing fixes to release workflow and docker entrypoint (#1376)
Co-authored-by: Arnaud <arnaud@status.im>
2026-01-21 17:28:49 +00:00
gmega
a1631569d1
Revert "fix: remove cirdl from build.nims, remove marketplace from docker entrypoint"
This reverts commit 14e8fa749c8abce498e20ddea9ab5e0d371dbc51.
2026-01-16 16:50:47 -03:00
gmega
14e8fa749c
fix: remove cirdl from build.nims, remove marketplace from docker entrypoint 2026-01-16 16:48:46 -03:00
gmega
fdad2200f4
fix: remove cirdl from build; stop installing Rust; update Dockerfile; delete prover benchmarks 2026-01-16 16:42:57 -03:00
Eric
2a1a548341
refactor!: remove unused modules (#1362)
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
2026-01-16 19:03:54 +00:00
Giuliano Mega
e894fb03fa
feat: primitives for sampling with and without replacement (#1125) 2026-01-16 13:49:04 -03:00
Arnaud
da70ebff7c
chore: improve c bindings (#1361)
Signed-off-by: Arnaud <arnaud@status.im>
2026-01-16 14:33:34 +00:00
Arnaud
2073090ed7
fix: close the discovery store (#1364) 2026-01-16 13:10:30 +00:00
Eric
1acedcf71c
fix(ci): introduce a number of integration test fixes (#1342)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Arnaud <arnaud@status.im>
Co-authored-by: gmega <giuliano.mega@gmail.com>
2026-01-16 10:47:59 +00:00
Jacek Sieka
cce002fcbf
feat: async tree building v2 (#1360)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: munna0908 <munnasitu0908@gmail.com>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2026-01-15 18:13:14 +00:00
Jakub
50bd183984
chore(ci): use container, add timeout, tmp cleanup (#1369)
Signed-off-by: Jakub Sokołowski <jakub@status.im>
2026-01-13 17:42:52 +00:00
Arnaud
e59e98ba92
chore: update nix config (#1363) 2026-01-12 12:23:32 +00:00
Arnaud
60861d6af8
chore: rename codex to logos storage (#1359) 2025-12-18 17:23:09 +00:00
Eric
49e801803f
ci: remove dist tests and devnet deployment (#1338) 2025-12-17 06:03:59 +00:00
Jacek Sieka
858101c74c
chore: bump eth & networking (#1353) 2025-12-15 10:00:51 +00:00
Jacek Sieka
bd49591fff
chore: bump *-serialization (#1352) 2025-12-12 08:03:56 +00:00
Jacek Sieka
6765beee2c
chore: assorted bumps (#1351) 2025-12-11 21:03:36 +00:00
Jacek Sieka
45fec4b524
chore: bump libbacktrace (#1349) 2025-12-11 20:42:53 +00:00
Jacek Sieka
9ac9f6ff3c
chore: drop usage of upraises (#1348) 2025-12-11 09:03:43 +00:00
Arnaud
bd36032251
feat: add c binding (#1322)
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 07:34:09 +00:00
Chrysostomos Nanakos
be759baf4d
feat: Block exchange optimizations (#1325)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 05:47:02 +00:00
Eric
6147a751f1
fix(ci): Remove macos amd release build (#1337) 2025-11-13 05:37:43 +00:00
Eric
ee47ca8760
feat(libs): Use libp2p multiformats extensions instead of a rolling branch (#1329) 2025-11-13 04:48:33 +00:00
Eric
f791a960f2
fix(ci): Windows SIGILL in CI (#1339) 2025-11-03 11:45:02 +00:00
Arnaud
db8f866db4
feat: check if CID exists in local store (#1331) 2025-11-02 04:32:47 +00:00
Eric
7aca2f0e61
fix(ci): Move conventional commits job to workflow (#1340) 2025-11-02 04:00:55 +00:00
Eric
072bff5cab
fix: ci integration tests (#1335) 2025-10-30 19:38:11 +11:00
Arnaud
af55a761e6
chore: skip marketplace and long integration tests (#1326) 2025-10-22 19:22:33 +11:00
Adam Uhlíř
e3d8d195c3
chore: update nim-libp2p (#1323) 2025-10-01 13:19:15 +02:00
Slava
d1f2e2399b
ci: validate pr title to adhere conventional commits (#1254) 2025-08-12 08:51:41 +00:00
Slava
8cd10edb69
ci: auto deploy codex on devnet (#1302) 2025-07-28 10:02:19 +00:00
Slava
6cf99e255c
ci: release master builds and upload them to the cloud (#1298) 2025-07-10 11:17:11 +00:00
Dmitriy Ryajov
7eb2fb12cc
make default dirs runtime, not compile time. (#1292) 2025-06-26 18:44:24 +00:00
Slava
352273ff81
chore: bump codex-contracts-eth (#1293) 2025-06-26 18:09:48 +00:00
Slava
9ef9258720
chore(ci): bump node to v22 (#1285) 2025-06-26 01:11:00 +00:00
markspanbroek
7927afe715
chore: update nph dependency (#1279)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-25 10:30:48 +00:00
markspanbroek
01615354af
refactor(ci): run integration tests in parallel by spinning up more runners (#1287) 2025-06-25 08:56:16 +00:00
Chrysostomos Nanakos
baff902137
fix: resolve shared block request cancellation conflicts (#1284) 2025-06-24 15:05:25 +00:00
markspanbroek
4d44154a40
fix(ci): remove "update" to gcc-14 on windows (#1288) 2025-06-24 09:00:56 +00:00
markspanbroek
e1c397e112
fix(tests): auto import all tests files and fix forgotten tests (#1281) 2025-06-23 11:18:59 +00:00
Arnaud
7b660e3554
chore(marketplace): use hardhat ignition (#1195) 2025-06-20 15:55:00 +00:00
Arnaud
c5e424ff1b
feat(marketplace) - add status l2 (Linea) network (#1160) 2025-06-20 12:30:40 +00:00
Slava
36f64ad3e6
chore: update testnet marketplace address (#1283) 2025-06-20 06:13:58 +00:00
Ben Bierens
235c0ec842
chore: updates codex-contracts-eth submodule (#1278)
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2025-06-19 10:31:52 +00:00
Arnaud
d443df441d
chore: improve marketplace integration tests (#1268) 2025-06-19 06:36:10 +00:00
Arnaud
e35aec7870
chore: increase gas limits (#1272) 2025-06-18 12:18:56 +00:00
Slava
93e4e0f177
ci(docker): add stable tag for dist-tests images (#1273) 2025-06-16 16:22:09 +00:00
Slava
6db6bf5f72
feat(docker): adjust entrypoint (#1271)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-14 04:25:29 +00:00
Chrysostomos Nanakos
b305e00160
Add support for slot reconstruction on unavailable slot detection (#1235)
Co-authored-by: Arnaud <arnaud@status.im>
2025-06-12 22:19:42 +00:00
490 changed files with 19610 additions and 30628 deletions

View File

@ -12,9 +12,6 @@ inputs:
nim_version: nim_version:
description: "Nim version" description: "Nim version"
default: "v2.0.14" default: "v2.0.14"
rust_version:
description: "Rust version"
default: "1.79.0"
shell: shell:
description: "Shell to run commands in" description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail" default: "bash --noprofile --norc -e -o pipefail"
@ -24,12 +21,6 @@ inputs:
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Rust (Linux)
if: inputs.os == 'linux'
shell: ${{ inputs.shell }} {0}
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=${{ inputs.rust_version }} -y
- name: APT (Linux amd64/arm64) - name: APT (Linux amd64/arm64)
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64') if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
@ -65,7 +56,6 @@ runs:
mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-toolchain
mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-cmake
mingw-w64-ucrt-x86_64-ntldd-git mingw-w64-ucrt-x86_64-ntldd-git
mingw-w64-ucrt-x86_64-rust
- name: MSYS2 (Windows i386) - name: MSYS2 (Windows i386)
if: inputs.os == 'windows' && inputs.cpu == 'i386' if: inputs.os == 'windows' && inputs.cpu == 'i386'
@ -79,13 +69,6 @@ runs:
mingw-w64-i686-toolchain mingw-w64-i686-toolchain
mingw-w64-i686-cmake mingw-w64-i686-cmake
mingw-w64-i686-ntldd-git mingw-w64-i686-ntldd-git
mingw-w64-i686-rust
- name: MSYS2 (Windows All) - Update to gcc 14
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
- name: Install gcc 14 on Linux - name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04) # We don't want to install gcc 14 for coverage (Ubuntu 20.04)
@ -107,15 +90,22 @@ runs:
if: inputs.os == 'linux' || inputs.os == 'macos' if: inputs.os == 'linux' || inputs.os == 'macos'
uses: hendrikmuhs/ccache-action@v1.2 uses: hendrikmuhs/ccache-action@v1.2
with: with:
create-symlink: true create-symlink: false
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }} key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
evict-old-files: 7d evict-old-files: 7d
- name: Add ccache to path on Linux/Mac
if: inputs.os == 'linux' || inputs.os == 'macos'
shell: ${{ inputs.shell }} {0}
run: |
echo "/usr/lib/ccache:/usr/local/opt/ccache/libexec" >> "$GITHUB_PATH"
echo "/usr/local/opt/ccache/libexec" >> "$GITHUB_PATH"
- name: Install ccache on Windows - name: Install ccache on Windows
if: inputs.os == 'windows' if: inputs.os == 'windows'
uses: hendrikmuhs/ccache-action@v1.2 uses: hendrikmuhs/ccache-action@v1.2
with: with:
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }} key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
evict-old-files: 7d evict-old-files: 7d
- name: Enable ccache on Windows - name: Enable ccache on Windows
@ -123,11 +113,11 @@ runs:
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
CCACHE_DIR=$(dirname $(which ccache))/ccached CCACHE_DIR=$(dirname $(which ccache))/ccached
mkdir ${CCACHE_DIR} mkdir -p ${CCACHE_DIR}
ln -s $(which ccache) ${CCACHE_DIR}/gcc.exe ln -sf $(which ccache) ${CCACHE_DIR}/gcc.exe
ln -s $(which ccache) ${CCACHE_DIR}/g++.exe ln -sf $(which ccache) ${CCACHE_DIR}/g++.exe
ln -s $(which ccache) ${CCACHE_DIR}/cc.exe ln -sf $(which ccache) ${CCACHE_DIR}/cc.exe
ln -s $(which ccache) ${CCACHE_DIR}/c++.exe ln -sf $(which ccache) ${CCACHE_DIR}/c++.exe
echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2 echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2
- name: Derive environment variables - name: Derive environment variables
@ -211,7 +201,7 @@ runs:
if: ${{ inputs.coverage != 'true' }} if: ${{ inputs.coverage != 'true' }}
with: with:
path: NimBinaries path: NimBinaries
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }} restore-keys: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}
- name: Set NIM_COMMIT - name: Set NIM_COMMIT
@ -224,7 +214,7 @@ runs:
run: | run: |
git config --global core.symlinks false git config --global core.symlinks false
- name: Build Nim and Codex dependencies - name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
which gcc which gcc

View File

@ -3,12 +3,14 @@ Tips for shorter build times
### Runner availability ### ### Runner availability ###
Currently, the biggest bottleneck when optimizing workflows is the availability When running on the Github free, pro or team plan, the bottleneck when
of Windows and macOS runners. Therefore, anything that reduces the time spent in optimizing workflows is the availability of macOS runners. Therefore, anything
Windows or macOS jobs will have a positive impact on the time waiting for that reduces the time spent in macOS jobs will have a positive impact on the
runners to become available. The usage limits for Github Actions are [described time waiting for runners to become available. On the Github enterprise plan,
here][limits]. You can see a breakdown of runner usage for your jobs in the this is not the case and you can more freely use parallelization on multiple
Github Actions tab ([example][usage]). runners. The usage limits for Github Actions are [described here][limits]. You
can see a breakdown of runner usage for your jobs in the Github Actions tab
([example][usage]).
### Windows is slow ### ### Windows is slow ###
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
Breaking up a long build job into several jobs that you run in parallel can have Breaking up a long build job into several jobs that you run in parallel can have
a positive impact on the wall clock time that a workflow runs. For instance, you a positive impact on the wall clock time that a workflow runs. For instance, you
might consider running unit tests and integration tests in parallel. Keep in might consider running unit tests and integration tests in parallel. When
mind however that availability of macOS and Windows runners is the biggest running on the Github free, pro or team plan, keep in mind that availability of
bottleneck. If you split a Windows job into two jobs, you now need to wait for macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
two Windows runners to become available! Therefore parallelization often only need to wait for two macOS runners to become available.
makes sense for Linux jobs.
### Refactoring ### ### Refactoring ###
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
to know whether you introduced a failure on all platforms, or only on a single to know whether you introduced a failure on all platforms, or only on a single
one. You might be tempted to disable fail-fast, but keep in mind that this keeps one. You might be tempted to disable fail-fast, but keep in mind that this keeps
runners busy for longer on a workflow that you know is going to fail anyway. runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed. Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage [usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action [composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows [reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache [cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

View File

@ -24,9 +24,9 @@ jobs:
run: run:
shell: ${{ matrix.shell }} {0} shell: ${{ matrix.shell }} {0}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }} name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
runs-on: ${{ matrix.builder }} runs-on: ${{ matrix.builder }}
timeout-minutes: 120 timeout-minutes: 90
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -48,42 +48,25 @@ jobs:
if: matrix.tests == 'unittest' || matrix.tests == 'all' if: matrix.tests == 'unittest' || matrix.tests == 'all'
run: make -j${ncpu} test run: make -j${ncpu} test
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Start Ethereum node with Codex contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |
npm install
npm start &
## Part 2 Tests ## ## Part 2 Tests ##
- name: Contract tests
if: matrix.tests == 'contract' || matrix.tests == 'all'
run: make -j${ncpu} testContracts
## Part 3 Tests ##
- name: Integration tests - name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all' if: matrix.tests == 'integration' || matrix.tests == 'all'
run: make -j${ncpu} testIntegration env:
STORAGE_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
run: make -j${ncpu} DEBUG=${{ runner.debug }} testIntegration
- name: Upload integration tests log files - name: Upload integration tests log files
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always() if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
with: with:
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
path: tests/integration/logs/ path: tests/integration/logs/
retention-days: 1 retention-days: 1
## Part 4 Tools ## ## Part 3 Tests ##
- name: Tools tests - name: Libstorage tests
if: matrix.tests == 'tools' || matrix.tests == 'all' if: matrix.tests == 'libstorage' || matrix.tests == 'all'
run: make -j${ncpu} testTools run: make -j${ncpu} testLibstorage
status: status:
if: always() if: always()

View File

@ -9,36 +9,29 @@ on:
env: env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: v2.2.4 nim_version: v2.2.8
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref || github.run_id }} group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
matrix: matrix:
name: Compute matrix
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix - name: Compute matrix
id: matrix id: matrix
uses: fabiocaccamo/create-matrix-action@v5 run: |
with: echo 'matrix<<EOF' >> $GITHUB_OUTPUT
matrix: | tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
build: build:
needs: matrix needs: matrix
@ -55,8 +48,8 @@ jobs:
- name: Check `nph` formatting - name: Check `nph` formatting
uses: arnetheduck/nph-action@v1 uses: arnetheduck/nph-action@v1
with: with:
version: 0.6.1 version: latest
options: "codex/ tests/" options: "storage/ tests/"
fail: true fail: true
suggest: true suggest: true
@ -83,7 +76,7 @@ jobs:
shell: bash shell: bash
- name: Upload coverage data to Codecov - name: Upload coverage data to Codecov
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v5
with: with:
directory: ./coverage/ directory: ./coverage/
fail_ci_if_error: true fail_ci_if_error: true

View File

@ -0,0 +1,19 @@
name: Conventional Commits Linting
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
merge_group:
jobs:
pr-title:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@1.4.1
with:
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'

View File

@ -14,7 +14,7 @@ on:
- '!.github/workflows/docker-dist-tests.yml' - '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml' - '!.github/workflows/docker-reusable.yml'
- 'docker/**' - 'docker/**'
- '!docker/codex.Dockerfile' - '!docker/storage.Dockerfile'
- '!docker/docker-entrypoint.sh' - '!docker/docker-entrypoint.sh'
workflow_dispatch: workflow_dispatch:
inputs: inputs:
@ -26,29 +26,14 @@ on:
jobs: jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push: build-and-push:
name: Build and Push name: Build and Push
uses: ./.github/workflows/docker-reusable.yml uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with: with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true' nimflags: '-d:disableMarchNative -d:storage_enable_api_debug_peers=true -d:storage_enable_log_counter=true'
nat_ip_auto: true nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }} tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests tag_suffix: dist-tests
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests" tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
run_release_tests: ${{ inputs.run_release_tests }} run_release_tests: ${{ inputs.run_release_tests }}
secrets: inherit secrets: inherit

View File

@ -5,12 +5,12 @@ on:
workflow_call: workflow_call:
inputs: inputs:
docker_file: docker_file:
default: docker/codex.Dockerfile default: docker/storage.Dockerfile
description: Dockerfile description: Dockerfile
required: false required: false
type: string type: string
docker_repo: docker_repo:
default: codexstorage/nim-codex default: logosstorage/logos-storage-nim
description: DockerHub repository description: DockerHub repository
required: false required: false
type: string type: string
@ -64,10 +64,10 @@ on:
required: false required: false
type: string type: string
default: false default: false
contract_image: outputs:
description: Specifies compatible smart contract image logos_storage_image:
required: false description: Logos Storage Docker image tag
type: string value: ${{ jobs.publish.outputs.logos_storage_image }}
env: env:
@ -81,9 +81,8 @@ env:
TAG_STABLE: ${{ inputs.tag_stable }} TAG_STABLE: ${{ inputs.tag_stable }}
TAG_SHA: ${{ inputs.tag_sha }} TAG_SHA: ${{ inputs.tag_sha }}
TAG_SUFFIX: ${{ inputs.tag_suffix }} TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests # Tests
TESTS_SOURCE: codex-storage/cs-codex-dist-tests TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }} CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }} CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
@ -91,6 +90,7 @@ env:
jobs: jobs:
# Compute variables
compute: compute:
name: Compute build ID name: Compute build ID
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -130,19 +130,11 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Docker - Variables
run: |
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
fi
- name: Docker - Meta - name: Docker - Meta
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
with: with:
images: ${{ env.DOCKER_REPO }} images: ${{ env.DOCKER_REPO }}
labels: ${{ env.CONTRACT_LABEL }}
- name: Docker - Set up Buildx - name: Docker - Set up Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
@ -189,12 +181,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.meta.outputs.version }} version: ${{ steps.meta.outputs.version }}
logos_storage_image: ${{ steps.image_tag.outputs.logos_storage_image }}
needs: [build, compute] needs: [build, compute]
steps: steps:
- name: Docker - Variables - name: Docker - Variables
run: | run: |
# Adjust custom suffix when set and # Adjust custom suffix when set
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi fi
@ -215,11 +207,6 @@ jobs:
echo "TAG_RAW=false" >> $GITHUB_ENV echo "TAG_RAW=false" >> $GITHUB_ENV
fi fi
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
fi
- name: Docker - Download digests - name: Docker - Download digests
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@ -235,7 +222,6 @@ jobs:
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
with: with:
images: ${{ env.DOCKER_REPO }} images: ${{ env.DOCKER_REPO }}
labels: ${{ env.CONTRACT_LABEL }}
flavor: | flavor: |
latest=${{ env.TAG_LATEST }} latest=${{ env.TAG_LATEST }}
suffix=${{ env.TAG_SUFFIX }},onlatest=true suffix=${{ env.TAG_SUFFIX }},onlatest=true
@ -257,9 +243,12 @@ jobs:
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *) $(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
- name: Docker - Image tag
id: image_tag
run: echo "logos_storage_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
- name: Docker - Inspect image - name: Docker - Inspect image
run: | run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.logos_storage_image }}
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
# Compute Tests inputs # Compute Tests inputs
@ -272,13 +261,13 @@ jobs:
source: ${{ steps.compute.outputs.source }} source: ${{ steps.compute.outputs.source }}
branch: ${{ env.TESTS_BRANCH }} branch: ${{ env.TESTS_BRANCH }}
workflow_source: ${{ env.TESTS_SOURCE }} workflow_source: ${{ env.TESTS_SOURCE }}
codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }} storagedockerimage: ${{ steps.compute.outputs.storagedockerimage }}
steps: steps:
- name: Compute Tests inputs - name: Compute Tests inputs
id: compute id: compute
run: | run: |
echo "source=${{ format('{0}/{1}', github.server_url, env.TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT" echo "source=${{ format('{0}/{1}', github.server_url, env.TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT" echo "storagedockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT"
# Compute Continuous Tests inputs # Compute Continuous Tests inputs
@ -308,11 +297,11 @@ jobs:
max-parallel: 1 max-parallel: 1
matrix: matrix:
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }} tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with: with:
source: ${{ needs.compute-tests-inputs.outputs.source }} source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }} branch: ${{ needs.compute-tests-inputs.outputs.branch }}
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }} storagedockerimage: ${{ needs.compute-tests-inputs.outputs.storagedockerimage }}
nameprefix: ${{ needs.compute-continuous-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-continuous-tests-inputs.outputs.continuous_tests_duration }} nameprefix: ${{ needs.compute-continuous-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-continuous-tests-inputs.outputs.continuous_tests_duration }}
tests_filter: ${{ matrix.tests }} tests_filter: ${{ matrix.tests }}
tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }} tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
@ -325,10 +314,10 @@ jobs:
name: Run Release Tests name: Run Release Tests
needs: [compute-tests-inputs] needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }} if: ${{ inputs.run_release_tests == 'true' }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with: with:
source: ${{ needs.compute-tests-inputs.outputs.source }} source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }} branch: ${{ needs.compute-tests-inputs.outputs.branch }}
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }} storagedockerimage: ${{ needs.compute-tests-inputs.outputs.storagedockerimage }}
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }} workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
secrets: inherit secrets: inherit

View File

@ -14,31 +14,15 @@ on:
- '!.github/workflows/docker.yml' - '!.github/workflows/docker.yml'
- '!.github/workflows/docker-reusable.yml' - '!.github/workflows/docker-reusable.yml'
- 'docker/**' - 'docker/**'
- '!docker/codex.Dockerfile' - '!docker/storage.Dockerfile'
- '!docker/docker-entrypoint.sh' - '!docker/docker-entrypoint.sh'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push: build-and-push:
name: Build and Push name: Build and Push
uses: ./.github/workflows/docker-reusable.yml uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with: with:
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }} tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }} tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
secrets: inherit secrets: inherit

View File

@ -52,7 +52,7 @@ jobs:
node-version: 18 node-version: 18
- name: Build OpenAPI - name: Build OpenAPI
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API" run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection - name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false

View File

@ -15,15 +15,14 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix - name: Compute matrix
id: matrix id: matrix
uses: fabiocaccamo/create-matrix-action@v5 run: |
with: echo 'matrix<<EOF' >> $GITHUB_OUTPUT
matrix: | tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
build: build:
needs: matrix needs: matrix

View File

@ -4,14 +4,15 @@ on:
push: push:
tags: tags:
- 'v*.*.*' - 'v*.*.*'
branches:
- master
workflow_dispatch: workflow_dispatch:
env: env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned nim_version: pinned
rust_version: 1.79.0 storage_binary_base: logos-storage
codex_binary_base: codex c_bindings_lib_base: libstorage
cirdl_binary_base: cirdl
build_dir: build build_dir: build
nim_flags: '' nim_flags: ''
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll' windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
@ -28,11 +29,10 @@ jobs:
uses: fabiocaccamo/create-matrix-action@v5 uses: fabiocaccamo/create-matrix-action@v5
with: with:
matrix: | matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
# Build # Build
build: build:
@ -50,139 +50,178 @@ jobs:
runs-on: ${{ matrix.builder }} runs-on: ${{ matrix.builder }}
timeout-minutes: 80 timeout-minutes: 80
steps: steps:
- name: Release - Checkout sources - name: Set conditional env variables
shell: bash
run: |
if [[ "${{ github.ref_type }}" == "tag" ]]; then
echo "VERSION=${{ github.ref_name }}" >> $GITHUB_ENV
echo "TAGGED_RELEASE=true" >> $GITHUB_ENV
else
echo "VERSION=${GITHUB_SHA::7}" >> $GITHUB_ENV
echo "TAGGED_RELEASE=false" >> $GITHUB_ENV
fi
- name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive
- name: Release - Setup Nimbus Build System - name: Setup Nimbus Build System
uses: ./.github/actions/nimbus-build-system uses: ./.github/actions/nimbus-build-system
with: with:
os: ${{ matrix.os }} os: ${{ matrix.os }}
cpu: ${{ matrix.cpu }} cpu: ${{ matrix.cpu }}
shell: ${{ matrix.shell }} shell: ${{ matrix.shell }}
nim_version: ${{ matrix.nim_version }} nim_version: ${{ matrix.nim_version }}
rust_version: ${{ matrix.rust_version }}
- name: Release - Compute binary name - name: Compute binary name
run: | run: |
case ${{ matrix.os }} in case ${{ matrix.os }} in
linux*) os_name="linux" ;; linux*) os_name="linux" ;;
macos*) os_name="darwin" ;; macos*) os_name="darwin" ;;
windows*) os_name="windows" ;; windows*) os_name="windows" ;;
esac esac
github_ref_name="${GITHUB_REF_NAME/\//-}" storage_binary="${{ env.storage_binary_base }}-${os_name}-${{ matrix.cpu }}-${{ env.VERSION }}"
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" c_bindings_lib="${{ env.c_bindings_lib_base }}-${os_name}-${{ matrix.cpu }}-${{ env.VERSION }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then if [[ ${os_name} == "windows" ]]; then
codex_binary="${codex_binary}.exe" storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe"
fi fi
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV echo "c_bindings_lib=${c_bindings_lib}" >>$GITHUB_ENV
- name: Release - Build - name: Build Logos Storage binary
run: | run: |
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}" make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Libraries - name: Package ${{ env.storage_binary_base }} Linux (compress and preserve perms)
if: matrix.os == 'linux'
run: | run: |
if [[ "${{ matrix.os }}" == "windows" ]]; then sudo apt-get update && sudo apt-get install -y zip
zip -j "${{ env.build_dir }}/${{env.storage_binary}}.zip" ./${{ env.build_dir }}/*
- name: Package ${{ env.storage_binary_base }} MacOS (compress and preserve perms)
if: matrix.os == 'macos'
run: |
zip -j "${{ env.build_dir }}/${{env.storage_binary}}.zip" ./${{ env.build_dir }}/*
- name: Package ${{ env.storage_binary_base }} Windows (compress and preserve perms)
if: matrix.os == 'windows'
shell: msys2 {0}
run: |
7z a -tzip "${{ env.build_dir }}/${{env.storage_binary}}.zip" ./${{ env.build_dir }}/*
- name: Upload Logos Storage binary to workflow artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ env.storage_binary }}.zip
path: ${{ env.build_dir }}/${{ env.storage_binary }}.zip
retention-days: 30
- name: Copy and zip Windows dlls to build/dlls dir (Windows)
if: matrix.os == 'windows'
run: |
mkdir -p "${{ env.build_dir }}/dlls"
for lib in ${{ env.windows_libs }}; do for lib in ${{ env.windows_libs }}; do
cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}" cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}/dlls"
done done
fi 7z a -tzip "${{ env.build_dir }}/${{ env.storage_binary }}-dlls.zip" ./${{ env.build_dir }}/dlls/*.dll
- name: Release - Upload codex build artifacts - name: Upload Windows dlls to workflow artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.codex_binary }}
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}*
retention-days: 30
- name: Release - Upload cirdl build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.cirdl_binary }}
path: ${{ env.build_dir }}/${{ env.cirdl_binary_base }}*
retention-days: 30
- name: Release - Upload windows libs
if: matrix.os == 'windows' if: matrix.os == 'windows'
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: release-${{ matrix.os }}-libs name: ${{ env.storage_binary }}-dlls.zip
path: ${{ env.build_dir }}/*.dll path: ${{ env.build_dir }}/${{ env.storage_binary }}-dlls.zip
retention-days: 30 retention-days: 30
- name: Build ${{ env.c_bindings_lib_base }} (Linux)
if: matrix.os == 'linux'
run: |
make -j${ncpu} update
make -j${ncpu} libstorage
- name: Build ${{ env.c_bindings_lib_base }} (MacOS)
if: matrix.os == 'macos'
run: |
make -j${ncpu} update
STORAGE_LIB_PARAMS="--passL:\"-Wl,-install_name,@rpath/${{ env.c_bindings_lib_base }}.dylib\"" make -j${ncpu} libstorage
- name: Build ${{ env.c_bindings_lib_base }} (Windows)
if: matrix.os == 'windows'
shell: msys2 {0}
run: |
make -j${ncpu} update
make -j${ncpu} libstorage
- name: Package ${{ env.c_bindings_lib_base }} Linux
if: matrix.os == 'linux'
run: |
sudo apt-get update && sudo apt-get install -y zip
zip -j "${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip" ${{ env.build_dir }}/${{ env.c_bindings_lib_base }}.so
zip -j "${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip" library/${{ env.c_bindings_lib_base }}.h
- name: Package ${{ env.c_bindings_lib_base }} MacOS
if: matrix.os == 'macos'
run: |
zip -j "${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip" ${{ env.build_dir }}/${{ env.c_bindings_lib_base }}.dylib
zip -j "${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip" library/${{ env.c_bindings_lib_base }}.h
- name: Package ${{ env.c_bindings_lib_base }} (Windows)
if: matrix.os == 'windows'
shell: msys2 {0}
run: |
7z a -tzip "${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip" ./${{ env.build_dir }}/${{ env.c_bindings_lib_base }}.dll
7z a -tzip "${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip" ./library/${{ env.c_bindings_lib_base }}.h
- name: Upload ${{ env.c_bindings_lib_base }} to workflow artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ env.c_bindings_lib }}.zip
path: ${{ env.build_dir }}/${{ env.c_bindings_lib }}.zip
if-no-files-found: error
# Release # Release
release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
if: success() || failure() if: success() || failure()
steps: steps:
- name: Release - Download binaries - name: Set conditional env variables
shell: bash
run: |
if [[ "${{ github.ref_type }}" == "tag" ]]; then
echo "VERSION=${{ github.ref_name }}" >> $GITHUB_ENV
echo "TAGGED_RELEASE=true" >> $GITHUB_ENV
else
echo "VERSION=${GITHUB_SHA::7}" >> $GITHUB_ENV
echo "TAGGED_RELEASE=false" >> $GITHUB_ENV
fi
- name: Download binaries from workflow artifacts into temp folder
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
pattern: release* pattern: ${{ env.storage_binary_base }}*
merge-multiple: true merge-multiple: true
path: /tmp/release path: /tmp/release
- name: Release - Compress and checksum - name: Download ${{ env.c_bindings_lib_base }} from workflow artifacts into temp folder
run: | uses: actions/download-artifact@v5
cd /tmp/release
checksum() {
arc="${1}"
sha256sum "${arc}" >"${arc}.sha256"
}
# Compress and prepare
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only
arc="${file%.*}.zip"
zip "${arc}" "${file}"
checksum "${arc}"
# Windows - binary and libs
arc="${file%.*}-libs.zip"
zip "${arc}" "${file}" ${{ env.windows_libs }}
rm -f "${file}"
checksum "${arc}"
else
# Linux/macOS
arc="${file}.tar.gz"
chmod 755 "${file}"
tar cfz "${arc}" "${file}"
rm -f "${file}"
checksum "${arc}"
fi
done
rm -f ${{ env.windows_libs }}
- name: Release - Upload compressed artifacts and checksums
uses: actions/upload-artifact@v4
with: with:
name: archives-and-checksums pattern: ${{ env.c_bindings_lib_base }}*
path: /tmp/release/ merge-multiple: true
retention-days: 30 path: /tmp/release
- name: Release - name: Create GH release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/') if: env.TAGGED_RELEASE == 'true'
with: with:
files: | files: |
/tmp/release/* /tmp/release/*-*
make_latest: true make_latest: true
- name: Generate Python SDK - name: Generate Python SDK
uses: peter-evans/repository-dispatch@v3 uses: peter-evans/repository-dispatch@v3
if: startsWith(github.ref, 'refs/tags/') if: env.TAGGED_RELEASE == 'true'
with: with:
token: ${{ secrets.DISPATCH_PAT }} token: ${{ secrets.DISPATCH_PAT }}
repository: codex-storage/py-codex-api-client repository: logos-storage/logos-storage-py-api-client
event-type: generate event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}' client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

7
.gitignore vendored
View File

@ -38,7 +38,7 @@ nimble.paths
.env .env
.update.timestamp .update.timestamp
codex.nims storage.nims
nimbus-build-system.paths nimbus-build-system.paths
docker/hostdatadir docker/hostdatadir
docker/prometheus-data docker/prometheus-data
@ -47,3 +47,8 @@ nim.cfg
tests/integration/logs tests/integration/logs
data/ data/
tests/cbindings/data-dir
tests/cbindings/downloaded_hello.txt
nimbledeps

63
.gitmodules vendored
View File

@ -33,26 +33,16 @@
url = https://github.com/status-im/nim-stew.git url = https://github.com/status-im/nim-stew.git
ignore = untracked ignore = untracked
branch = master branch = master
[submodule "vendor/nim-nitro"]
path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git
ignore = untracked
branch = master
[submodule "vendor/questionable"] [submodule "vendor/questionable"]
path = vendor/questionable path = vendor/questionable
url = https://github.com/status-im/questionable.git url = https://github.com/status-im/questionable.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
[submodule "vendor/asynctest"] [submodule "vendor/asynctest"]
path = vendor/asynctest path = vendor/asynctest
url = https://github.com/status-im/asynctest.git url = https://github.com/status-im/asynctest.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-presto"] [submodule "vendor/nim-presto"]
path = vendor/nim-presto path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git url = https://github.com/status-im/nim-presto.git
@ -132,19 +122,13 @@
path = vendor/nim-websock path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git url = https://github.com/status-im/nim-websock.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-contract-abi"] [submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi url = https://github.com/status-im/nim-contract-abi
[submodule "vendor/nim-json-rpc"]
path = vendor/nim-json-rpc
url = https://github.com/status-im/nim-json-rpc
[submodule "vendor/nim-zlib"] [submodule "vendor/nim-zlib"]
path = vendor/nim-zlib path = vendor/nim-zlib
url = https://github.com/status-im/nim-zlib url = https://github.com/status-im/nim-zlib
[submodule "vendor/nim-ethers"]
path = vendor/nim-ethers
url = https://github.com/status-im/nim-ethers
[submodule "vendor/lrucache.nim"] [submodule "vendor/lrucache.nim"]
path = vendor/lrucache.nim path = vendor/lrucache.nim
url = https://github.com/status-im/lrucache.nim url = https://github.com/status-im/lrucache.nim
@ -160,13 +144,10 @@
path = vendor/nim-taskpools path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git url = https://github.com/status-im/nim-taskpools.git
ignore = untracked ignore = untracked
branch = master branch = stable
[submodule "vendor/nim-leopard"] [submodule "vendor/logos-storage-nim-dht"]
path = vendor/nim-leopard path = vendor/logos-storage-nim-dht
url = https://github.com/status-im/nim-leopard.git url = https://github.com/logos-storage/logos-storage-nim-dht.git
[submodule "vendor/nim-codex-dht"]
path = vendor/nim-codex-dht
url = https://github.com/codex-storage/nim-codex-dht.git
ignore = untracked ignore = untracked
branch = master branch = master
[submodule "vendor/nim-datastore"] [submodule "vendor/nim-datastore"]
@ -175,12 +156,6 @@
[submodule "vendor/nim-sqlite3-abi"] [submodule "vendor/nim-sqlite3-abi"]
path = vendor/nim-sqlite3-abi path = vendor/nim-sqlite3-abi
url = https://github.com/arnetheduck/nim-sqlite3-abi.git url = https://github.com/arnetheduck/nim-sqlite3-abi.git
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"]
path = vendor/codex-contracts-eth
url = https://github.com/status-im/codex-contracts-eth
[submodule "vendor/nim-protobuf-serialization"] [submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization url = https://github.com/status-im/nim-protobuf-serialization
@ -193,28 +168,15 @@
[submodule "vendor/npeg"] [submodule "vendor/npeg"]
path = vendor/npeg path = vendor/npeg
url = https://github.com/zevv/npeg url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git
[submodule "vendor/constantine"] [submodule "vendor/constantine"]
path = vendor/constantine path = vendor/constantine
url = https://github.com/mratsim/constantine.git url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git
ignore = untracked
branch = master
[submodule "vendor/codex-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
ignore = untracked
branch = master
[submodule "vendor/nim-serde"] [submodule "vendor/nim-serde"]
path = vendor/nim-serde path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"] [submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"] [submodule "vendor/nim-zippy"]
path = vendor/nim-zippy path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git url = https://github.com/status-im/nim-zippy.git
@ -225,9 +187,12 @@
path = vendor/nim-quic path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git url = https://github.com/vacp2p/nim-quic.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-ngtcp2"] [submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2 path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-merkletree"]
path = vendor/nim-merkletree
url = https://github.com/logos-storage/nim-merkletree

5
.nph.toml Normal file
View File

@ -0,0 +1,5 @@
# Add to default exclusions (recommended - doesn't lose the defaults)
extend-exclude = [
# "tests/fixtures",
"vendor",
]

View File

@ -1,5 +1,7 @@
The MIT License (MIT) The MIT License (MIT)
Copyright © 2025-2026 Logos
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights

100
Makefile
View File

@ -15,7 +15,7 @@
# #
# If NIM_COMMIT is set to "nimbusbuild", this will use the # If NIM_COMMIT is set to "nimbusbuild", this will use the
# version pinned by nimbus-build-system. # version pinned by nimbus-build-system.
PINNED_NIM_VERSION := v2.2.4 PINNED_NIM_VERSION := v2.2.8
ifeq ($(NIM_COMMIT),) ifeq ($(NIM_COMMIT),)
NIM_COMMIT := $(PINNED_NIM_VERSION) NIM_COMMIT := $(PINNED_NIM_VERSION)
@ -64,6 +64,15 @@ else
endif endif
export CXXFLAGS export CXXFLAGS
LIBSTORAGE_PARAMS :=
# By default, libstorage disables the restapi. To build libstorage with the rest
# api enabled for remote debugging, use `make DEBUG=1 libstorage`
ifeq ($(DEBUG),1)
LIBSTORAGE_PARAMS := $(LIBSTORAGE_PARAMS) -d:LibstorageDisableRestApi=0
else ifeq ($(DEBUG),0)
LIBSTORAGE_PARAMS := $(LIBSTORAGE_PARAMS) -d:LibstorageDisableRestApi=1
endif
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target # we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk -include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
@ -74,6 +83,9 @@ export CXXFLAGS
deps \ deps \
libbacktrace \ libbacktrace \
test \ test \
testAll \
testIntegration \
testLibstorage \
update update
ifeq ($(NIM_PARAMS),) ifeq ($(NIM_PARAMS),)
@ -93,15 +105,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.' # default target, because it's the first one that doesn't start with '.'
# Builds the codex binary # Builds the Logos Storage binary
all: | build deps all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim toolsCirdl $(NIM_PARAMS) build.nims
# must be included after the default target # must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk -include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
@ -135,30 +142,28 @@ test: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
# Builds and runs the smart contract tests
testContracts: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs the integration tests # Builds and runs the integration tests
testIntegration: | build deps testIntegration: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims $(ENV_SCRIPT) nim testIntegration $(TEST_PARAMS) $(NIM_PARAMS) build.nims
# Builds and runs all tests (except for Taiko L2 tests) # Builds a C example that uses the libstorage C library and runs it
testLibstorage: | build deps
$(MAKE) $(if $(ncpu),-j$(ncpu),) libstorage
cd tests/cbindings && \
if [ "$(detected_OS)" = "Windows" ]; then \
gcc -o storage.exe storage.c -L../../build -lstorage -pthread && \
PATH=../../build:$$PATH ./storage.exe; \
else \
gcc -o storage storage.c -L../../build -lstorage -Wl,-rpath,../../ -pthread && \
LD_LIBRARY_PATH=../../build ./storage; \
fi
# Builds and runs all tests
testAll: | build deps testAll: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
$(MAKE) $(if $(ncpu),-j$(ncpu),) testLibstorage
# Builds and runs Taiko L2 tests
testTaiko: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) build.nims
# Builds and runs tool tests
testTools: | cirdl
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTools $(NIM_PARAMS) build.nims
# nim-libbacktrace # nim-libbacktrace
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
@ -176,11 +181,11 @@ endif
coverage: coverage:
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test $(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
cd nimcache/release/testCodex && rm -f *.c cd nimcache/release/testStorage && rm -f *.c
mkdir -p coverage mkdir -p coverage
lcov --capture --keep-going --directory nimcache/release/testCodex --output-file coverage/coverage.info lcov --capture --keep-going --directory nimcache/release/testStorage --output-file coverage/coverage.info
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim shopt -s globstar && ls $$(pwd)/storage/{*,**/*}.nim
shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/storage/{*,**/*}.nim --output-file coverage/coverage.f.info
echo -e $(BUILD_MSG) "coverage/report/index.html" echo -e $(BUILD_MSG) "coverage/report/index.html"
genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report
@ -209,8 +214,10 @@ NPH:=$(shell dirname $(NIM_BINARY))/nph
build-nph: build-nph:
ifeq ("$(wildcard $(NPH))","") ifeq ("$(wildcard $(NPH))","")
$(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \ cd vendor/nph && \
mv vendor/nph/src/nph $(shell dirname $(NPH)) nimble setup -l && \
nimble build -d:disable_libbacktrace && \
mv ./nph ../../$(shell dirname $(NPH)) && \
echo "nph utility is available at " $(NPH) echo "nph utility is available at " $(NPH)
endif endif
@ -230,8 +237,9 @@ nph/%: build-nph
format: format:
$(NPH) *.nim $(NPH) *.nim
$(NPH) codex/ $(NPH) storage/
$(NPH) tests/ $(NPH) tests/
$(NPH) library/
clean-nph: clean-nph:
rm -f $(NPH) rm -f $(NPH)
@ -242,4 +250,32 @@ print-nph-path:
clean: | clean-nph clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) $(LIBSTORAGE_PARAMS) storage.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) $(LIBSTORAGE_PARAMS) storage.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) $(LIBSTORAGE_PARAMS) storage.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) $(LIBSTORAGE_PARAMS) storage.nims
endif
endif # "variables.mk" was not included endif # "variables.mk" was not included

View File

@ -1,27 +1,27 @@
# Codex Decentralized Durability Engine # Logos Storage Filesharing Client
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval. > The Logos Storage project aims to create a filesharing client that allows sharing data privately in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha. > WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability) [![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster) [![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster) [![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex) [![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ) [![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex) ![Docker Pulls](https://img.shields.io/docker/pulls/logosstorage/logos-storage-nim)
## Build and Run ## Build and Run
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build).
To build the project, clone it and run: To build the project, clone it and run:
```bash ```bash
make update && make make update && make
# Tip: use -j{ncpu} to for parallel execution, eg:
# make -j12 update && make -j12
``` ```
The executable will be placed under the `build` directory under the project root. The executable will be placed under the `build` directory under the project root.
@ -29,37 +29,67 @@ The executable will be placed under the `build` directory under the project root
Run the client with: Run the client with:
```bash ```bash
build/codex build/storage
``` ```
## Configuration ## Configuration
It is possible to configure a Codex node in several ways: It is possible to configure a Logos Storage node in several ways:
1. CLI options 1. CLI options
2. Environment variables 2. Environment variables
3. Configuration file 3. Configuration file
The order of priority is the same as above: CLI options --> Environment variables --> Configuration file. The order of priority is the same as above: CLI options --> Environment variables --> Configuration file.
Please check [documentation](https://docs.codex.storage/learn/run#configuration) for more information. Please check `build/storage --help` for more information.
## Guides
To get acquainted with Codex, consider:
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API ## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The C API bindings are located in the `library` folder.
Currently, only Go bindings are provided in this repo. However, Rust bindings for Logos Storage can be found at https://github.com/nipsysdev/storage-rust-bindings.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
See https://github.com/logos-storage/logos-storage-go-bindings-example.
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`/`libstorage.dylib`/`libstroage.dll`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development ## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting ### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. `logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`. If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`. In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.

View File

@ -1,2 +0,0 @@
ceremony
circuit_bench_*

View File

@ -1,33 +0,0 @@
## Benchmark Runner
Modify `runAllBenchmarks` proc in `run_benchmarks.nim` to the desired parameters and variations.
Then run it:
```sh
nim c -r run_benchmarks
```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI
Runs Codex's prover setup with Ark / Circom.
Compile:
```sh
nim c codex_ark_prover_cli.nim
```
Run to see usage:
```sh
./codex_ark_prover_cli.nim -h
```

View File

@ -1,15 +0,0 @@
--path:
".."
--path:
"../tests"
--threads:
on
--tlsEmulation:
off
--d:
release
# when not defined(chronicles_log_level):
# --define:"chronicles_log_level:NONE" # compile all log statements
# --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime
# --"import":"logging" # ensure that logging is ignored at runtime

View File

@ -1,187 +0,0 @@
import std/[hashes, json, strutils, strformat, os, osproc, uri]
import ./utils
type
CircuitEnv* = object
nimCircuitCli*: string
circuitDirIncludes*: string
ptauPath*: string
ptauUrl*: Uri
codexProjDir*: string
CircuitArgs* = object
depth*: int
maxslots*: int
cellsize*: int
blocksize*: int
nsamples*: int
entropy*: int
seed*: int
nslots*: int
ncells*: int
index*: int
proc findCodexProjectDir(): string =
## find codex proj dir -- assumes this script is in codex/benchmarks
result = currentSourcePath().parentDir.parentDir
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir()
result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli"
result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
result.codexProjDir = codexDir
proc check*(env: var CircuitEnv) =
## check that the CWD of script is in the codex parent
let codexProjDir = findCodexProjectDir()
echo "\n\nFound project dir: ", codexProjDir
let snarkjs = findExe("snarkjs")
if snarkjs == "":
echo dedent"""
ERROR: must install snarkjs first
npm install -g snarkjs@latest
"""
let circom = findExe("circom")
if circom == "":
echo dedent"""
ERROR: must install circom first
git clone https://github.com/iden3/circom.git
cargo install --path circom
"""
if snarkjs == "" or circom == "":
quit 2
echo "Found SnarkJS: ", snarkjs
echo "Found Circom: ", circom
if not env.nimCircuitCli.fileExists:
echo "Nim Circuit reference cli not found: ", env.nimCircuitCli
echo "Building Circuit reference cli...\n"
withDir env.nimCircuitCli.parentDir:
runit "nimble build -d:release --styleCheck:off cli"
echo "CWD: ", getCurrentDir()
assert env.nimCircuitCli.fileExists()
echo "Found NimCircuitCli: ", env.nimCircuitCli
echo "Found Circuit Path: ", env.circuitDirIncludes
echo "Found PTAU file: ", env.ptauPath
proc downloadPtau*(ptauPath: string, ptauUrl: Uri) =
## download ptau file using curl if needed
if not ptauPath.fileExists:
echo "Ceremony file not found, downloading..."
createDir ptauPath.parentDir
withDir ptauPath.parentDir:
runit fmt"curl --output '{ptauPath}' '{$ptauUrl}/{ptauPath.splitPath().tail}'"
else:
echo "Found PTAU file at: ", ptauPath
proc getCircuitBenchStr*(args: CircuitArgs): string =
for f, v in fieldPairs(args):
result &= "_" & f & $v
proc getCircuitBenchPath*(args: CircuitArgs, env: CircuitEnv): string =
## generate folder name for unique circuit args
result = env.codexProjDir / "benchmarks/circuit_bench" & getCircuitBenchStr(args)
proc generateCircomAndSamples*(args: CircuitArgs, env: CircuitEnv, name: string) =
## run nim circuit and sample generator
var cliCmd = env.nimCircuitCli
for f, v in fieldPairs(args):
cliCmd &= " --" & f & "=" & $v
if not "input.json".fileExists:
echo "Generating Circom Files..."
runit fmt"{cliCmd} -v --circom={name}.circom --output=input.json"
proc createCircuit*(
args: CircuitArgs,
env: CircuitEnv,
name = "proof_main",
circBenchDir = getCircuitBenchPath(args, env),
someEntropy = "some_entropy_75289v3b7rcawcsyiur",
doGenerateWitness = false,
): tuple[dir: string, name: string] =
## Generates all the files needed for to run a proof circuit. Downloads the PTAU file if needed.
##
## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs.
##
let circdir = circBenchDir
downloadPtau env.ptauPath, env.ptauUrl
echo "Creating circuit dir: ", circdir
createDir circdir
withDir circdir:
writeFile("circuit_params.json", pretty(%*args))
let
inputs = circdir / "input.json"
zkey = circdir / fmt"{name}.zkey"
wasm = circdir / fmt"{name}.wasm"
r1cs = circdir / fmt"{name}.r1cs"
wtns = circdir / fmt"{name}.wtns"
generateCircomAndSamples(args, env, name)
if not wasm.fileExists or not r1cs.fileExists:
runit fmt"circom --r1cs --wasm --O2 -l{env.circuitDirIncludes} {name}.circom"
moveFile fmt"{name}_js" / fmt"{name}.wasm", fmt"{name}.wasm"
echo "Found wasm: ", wasm
echo "Found r1cs: ", r1cs
if not zkey.fileExists:
echo "ZKey not found, generating..."
putEnv "NODE_OPTIONS", "--max-old-space-size=8192"
if not fmt"{name}_0000.zkey".fileExists:
runit fmt"snarkjs groth16 setup {r1cs} {env.ptauPath} {name}_0000.zkey"
echo fmt"Generated {name}_0000.zkey"
let cmd =
fmt"snarkjs zkey contribute {name}_0000.zkey {name}_0001.zkey --name='1st Contributor Name'"
echo "CMD: ", cmd
let cmdRes = execCmdEx(cmd, options = {}, input = someEntropy & "\n")
assert cmdRes.exitCode == 0
moveFile fmt"{name}_0001.zkey", fmt"{name}.zkey"
removeFile fmt"{name}_0000.zkey"
if not wtns.fileExists and doGenerateWitness:
runit fmt"node generate_witness.js {wtns} ../input.json ../witness.wtns"
return (circdir, name)
when isMainModule:
echo "findCodexProjectDir: ", findCodexProjectDir()
## test run creating a circuit
var env = CircuitEnv.default()
env.check()
let args = CircuitArgs(
depth: 32, # maximum depth of the slot tree
maxslots: 256, # maximum number of slots
cellsize: 2048, # cell size in bytes
blocksize: 65536, # block size in bytes
nsamples: 5, # number of samples to prove
entropy: 1234567, # external randomness
seed: 12345, # seed for creating fake data
nslots: 11, # number of slots in the dataset
index: 3, # which slot we prove (0..NSLOTS-1)
ncells: 512, # number of cells in this slot
)
let benchenv = createCircuit(args, env)
echo "\nBench dir:\n", benchenv

View File

@ -1,105 +0,0 @@
import std/[sequtils, strformat, os, options, importutils]
import std/[times, os, strutils, terminal]
import pkg/questionable
import pkg/questionable/results
import pkg/datastore
import pkg/codex/[rng, stores, merkletree, codextypes, slots]
import pkg/codex/utils/[json, poseidon2digest]
import pkg/codex/slots/[builder, sampler/utils, backends/helpers]
import pkg/constantine/math/[arithmetic, io/io_bigints, io/io_fields]
import ./utils
import ./create_circuits
type CircuitFiles* = object
r1cs*: string
wasm*: string
zkey*: string
inputs*: string
proc runArkCircom(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
echo "Loading sample proof..."
var
inputData = files.inputs.readFile()
inputJson = !JsonNode.parse(inputData)
proofInputs = Poseidon2Hash.jsonToProofInput(inputJson)
circom = CircomCompat.init(
files.r1cs,
files.wasm,
files.zkey,
slotDepth = args.depth,
numSamples = args.nsamples,
)
defer:
circom.release() # this comes from the rust FFI
echo "Sample proof loaded..."
echo "Proving..."
let nameArgs = getCircuitBenchStr(args)
var proof: CircomProof
benchmark fmt"prover-{nameArgs}", benchmarkLoops:
proof = circom.prove(proofInputs).tryGet
var verRes: bool
benchmark fmt"verify-{nameArgs}", benchmarkLoops:
verRes = circom.verify(proof, proofInputs).tryGet
echo "verify result: ", verRes
proc runRapidSnark(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
# time rapidsnark ${CIRCUIT_MAIN}.zkey witness.wtns proof.json public.json
echo "generating the witness..."
## TODO
proc runBenchmark(args: CircuitArgs, env: CircuitEnv, benchmarkLoops: int) =
## execute benchmarks given a set of args
## will create a folder in `benchmarks/circuit_bench_$(args)`
##
let env = createCircuit(args, env)
## TODO: copy over testcircomcompat proving
let files = CircuitFiles(
r1cs: env.dir / fmt"{env.name}.r1cs",
wasm: env.dir / fmt"{env.name}.wasm",
zkey: env.dir / fmt"{env.name}.zkey",
inputs: env.dir / fmt"input.json",
)
runArkCircom(args, files, benchmarkLoops)
proc runAllBenchmarks*() =
echo "Running benchmark"
# setup()
var env = CircuitEnv.default()
env.check()
var args = CircuitArgs(
depth: 32, # maximum depth of the slot tree
maxslots: 256, # maximum number of slots
cellsize: 2048, # cell size in bytes
blocksize: 65536, # block size in bytes
nsamples: 1, # number of samples to prove
entropy: 1234567, # external randomness
seed: 12345, # seed for creating fake data
nslots: 11, # number of slots in the dataset
index: 3, # which slot we prove (0..NSLOTS-1)
ncells: 512, # number of cells in this slot
)
let
numberSamples = 3
benchmarkLoops = 5
for i in 1 .. numberSamples:
args.nsamples = i
stdout.styledWriteLine(fgYellow, "\nbenchmarking args: ", $args)
runBenchmark(args, env, benchmarkLoops)
printBenchMarkSummaries()
when isMainModule:
runAllBenchmarks()

View File

@ -1,76 +0,0 @@
import std/tables
template withDir*(dir: string, blk: untyped) =
## set working dir for duration of blk
let prev = getCurrentDir()
try:
setCurrentDir(dir)
`blk`
finally:
setCurrentDir(prev)
template runit*(cmd: string) =
## run shell commands and verify it runs without an error code
echo "RUNNING: ", cmd
let cmdRes = execShellCmd(cmd)
echo "STATUS: ", cmdRes
assert cmdRes == 0
var benchRuns* = newTable[string, tuple[avgTimeSec: float, count: int]]()
func avg(vals: openArray[float]): float =
for v in vals:
result += v / vals.len().toFloat()
template benchmark*(name: untyped, count: int, blk: untyped) =
let benchmarkName: string = name
## simple benchmarking of a block of code
var runs = newSeqOfCap[float](count)
for i in 1 .. count:
block:
let t0 = epochTime()
`blk`
let elapsed = epochTime() - t0
runs.add elapsed
var elapsedStr = ""
for v in runs:
elapsedStr &= ", " & v.formatFloat(format = ffDecimal, precision = 3)
stdout.styledWriteLine(
fgGreen, "CPU Time [", benchmarkName, "] ", "avg(", $count, "): ", elapsedStr, " s"
)
benchRuns[benchmarkName] = (runs.avg(), count)
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
if printRegular:
echo ""
for k, v in benchRuns:
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
if printTsv:
echo ""
echo "name", "\t", "avgTimeSec", "\t", "count"
for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math
func floorLog2*(x: int): int =
var k = -1
var y = x
while (y > 0):
k += 1
y = y shr 1
return k
func ceilingLog2*(x: int): int =
if (x == 0):
return -1
else:
return (floorLog2(x - 1) + 1)
func checkPowerOfTwo*(x: int, what: string): int =
let k = ceilingLog2(x)
assert(x == 2 ^ k, ("`" & what & "` is expected to be a power of 2"))
return x

View File

@ -1,15 +1,26 @@
mode = ScriptMode.Verbose mode = ScriptMode.Verbose
import std/os except commandLineParams import std/os except commandLineParams
import std/strutils
### Helper functions ### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = proc truthy(val: string): bool =
const truthySwitches = @["yes", "1", "on", "true"]
return val in truthySwitches
proc buildBinary(
srcName: string,
outName = os.lastPathPart(srcName),
srcDir = "./",
params = "",
lang = "c",
) =
if not dirExists "build": if not dirExists "build":
mkDir "build" mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params var extra_params = params
when compiles(commandLineParams): when defined(commandLineParams):
for param in commandLineParams(): for param in commandLineParams():
extra_params &= " " & param extra_params &= " " & param
else: else:
@ -18,58 +29,64 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
let let
# Place build output in 'build' folder, even if name includes a longer path. # Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd = cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
name & ".nim" srcName & ".nim"
exec(cmd) exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") = proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
buildBinary name, srcDir, params if not dirExists "build":
exec "build/" & name mkDir "build"
task codex, "build codex binary": if `type` == "dynamic":
buildBinary "codex", let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:chronicles_runtime_filtering " & "-d:chronicles_log_level=TRACE " & params &
" " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:chronicles_runtime_filtering " & "-d:chronicles_log_level=TRACE " & params &
" " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task storage, "build logos storage binary":
buildBinary "storage",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE" params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary": task testStorage, "Build & run Logos Storage tests":
buildBinary "tools/cirdl/cirdl" test "testStorage", outName = "testStorage"
task testCodex, "Build & run Codex tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true"
task testContracts, "Build & run Codex Contract tests":
test "testContracts"
task testIntegration, "Run integration tests": task testIntegration, "Run integration tests":
buildBinary "codex", buildBinary "storage",
params = outName = "storage",
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true" params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
test "testIntegration" test "testIntegration"
# use params to enable logging from the integration test executable # use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " & # test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE" # "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary": task build, "build Logos Storage binary":
codexTask() storageTask()
task test, "Run tests": task test, "Run tests":
testCodexTask() testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)": task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask() testStorageTask()
testContractsTask()
testIntegrationTask() testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
codexTask()
test "testTaiko"
import strutils import strutils
import os import os
@ -93,15 +110,13 @@ task coverage, "generates code coverage report":
echo " *****************************************************************" echo " *****************************************************************"
var nimSrcs = " " var nimSrcs = " "
for f in walkDirRec("codex", {pcFile}): for f in walkDirRec("storage", {pcFile}):
if f.endswith(".nim"): if f.endswith(".nim"):
nimSrcs.add " " & f.absolutePath.quoteShell() nimSrcs.add " " & f.absolutePath.quoteShell()
echo "======== Running Tests ======== " echo "======== Running Tests ======== "
test "coverage", test "coverage",
srcDir = "tests/", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release"
params =
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c") exec("rm nimcache/coverage/*.c")
rmDir("coverage") rmDir("coverage")
mkDir("coverage") mkDir("coverage")
@ -114,10 +129,32 @@ task coverage, "generates code coverage report":
nimSrcs nimSrcs
) )
echo " ======== Generating HTML coverage report ======== " echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ") exec(
"genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report "
)
echo " ======== Coverage report Done ======== " echo " ======== Coverage report Done ======== "
task showCoverage, "open coverage html": task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== " echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "": if findExe("open") != "":
exec("open coverage/report/index.html") exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

51
ci/linux.Jenkinsfile Normal file
View File

@ -0,0 +1,51 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.37'
pipeline {
agent {
docker {
label 'linuxcontainer'
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
args '--volume=/nix:/nix ' +
'--volume=/etc/nix:/etc/nix '
}
}
options {
timestamps()
ansiColor('xterm')
timeout(time: 20, unit: 'MINUTES')
disableConcurrentBuilds()
disableRestartFromStage()
/* manage how many builds we keep */
buildDiscarder(logRotator(
numToKeepStr: '20',
daysToKeepStr: '30',
))
}
stages {
stage('Build') {
steps {
script {
nix.flake("default")
}
}
}
stage('Check') {
steps {
script {
sh './result/bin/storage --version'
}
}
}
}
post {
cleanup {
cleanWs()
dir(env.WORKSPACE_TMP) { deleteDir() }
}
}
}

View File

@ -1,11 +1,15 @@
#!/usr/bin/env groovy #!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.13' library 'status-jenkins-lib@v1.9.37'
pipeline { pipeline {
agent { label 'linux && x86_64 && nix-2.24' } agent { label 'macos && aarch64 && nix' }
options { options {
timestamps()
ansiColor('xterm')
timeout(time: 20, unit: 'MINUTES')
disableConcurrentBuilds() disableConcurrentBuilds()
disableRestartFromStage()
/* manage how many builds we keep */ /* manage how many builds we keep */
buildDiscarder(logRotator( buildDiscarder(logRotator(
numToKeepStr: '20', numToKeepStr: '20',
@ -25,13 +29,16 @@ pipeline {
stage('Check') { stage('Check') {
steps { steps {
script { script {
sh './result/bin/codex --version' sh './result/bin/storage --version'
} }
} }
} }
} }
post { post {
cleanup { cleanWs() } cleanup {
cleanWs()
dir(env.WORKSPACE_TMP) { deleteDir() }
}
} }
} }

View File

@ -1,5 +0,0 @@
import ./blockexchange/[network, engine, peers]
import ./blockexchange/protobuf/[blockexc, presence]
export network, engine, blockexc, presence, peers

View File

@ -1,6 +0,0 @@
import ./engine/discovery
import ./engine/advertiser
import ./engine/engine
import ./engine/payments
export discovery, advertiser, engine, payments

View File

@ -1,726 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/sets
import std/options
import std/algorithm
import std/sugar
import pkg/chronos
import pkg/libp2p/[cid, switch, multihash, multicodec]
import pkg/metrics
import pkg/stint
import pkg/questionable
import ../../rng
import ../../stores/blockstore
import ../../blocktype
import ../../utils
import ../../utils/exceptions
import ../../utils/trackedfutures
import ../../merkletree
import ../../logutils
import ../../manifest
import ../protobuf/blockexc
import ../protobuf/presence
import ../network
import ../peers
import ./payments
import ./discovery
import ./advertiser
import ./pendingblocks
export peers, pendingblocks, payments, discovery
logScope:
topics = "codex blockexcengine"
declareCounter(
codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent"
)
declareCounter(
codex_block_exchange_want_have_lists_received,
"codex blockexchange wantHave lists received",
)
declareCounter(
codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent"
)
declareCounter(
codex_block_exchange_want_block_lists_received,
"codex blockexchange wantBlock lists received",
)
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
declareCounter(
codex_block_exchange_blocks_received, "codex blockexchange blocks received"
)
const
DefaultMaxPeersPerRequest* = 10
DefaultTaskQueueSize = 100
DefaultConcurrentTasks = 10
type
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
BlockExcEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
network*: BlockExcNetwork # Petwork interface
peers*: PeerCtxStore # Peers we're currently actively exchanging with
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
# Peers we're currently processing tasks for
concurrentTasks: int # Number of concurrent peers we're serving at any given time
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
blockexcRunning: bool # Indicates if the blockexc task is running
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing
discovery*: DiscoveryEngine
advertiser*: Advertiser
Pricing* = object
address*: EthAddress
price*: UInt256
# attach task scheduler to engine
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
if self.taskQueue.pushOrUpdateNoWait(task).isOk():
trace "Task scheduled for peer", peer = task.id
else:
warn "Unable to schedule task for peer", peer = task.id
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
## Start the blockexc task
##
await self.discovery.start()
await self.advertiser.start()
trace "Blockexc starting with concurrent tasks", tasks = self.concurrentTasks
if self.blockexcRunning:
warn "Starting blockexc twice"
return
self.blockexcRunning = true
for i in 0 ..< self.concurrentTasks:
let fut = self.blockexcTaskRunner()
self.trackedFutures.track(fut)
proc stop*(self: BlockExcEngine) {.async: (raises: []).} =
## Stop the blockexc blockexc
##
await self.trackedFutures.cancelTracked()
await self.network.stop()
await self.discovery.stop()
await self.advertiser.stop()
trace "NetworkStore stop"
if not self.blockexcRunning:
warn "Stopping blockexc without starting it"
return
self.blockexcRunning = false
trace "NetworkStore stopped"
proc sendWantHave(
self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
): Future[void] {.async: (raises: [CancelledError]).} =
for p in peers:
let toAsk = addresses.filterIt(it notin p.peerHave)
trace "Sending wantHave request", toAsk, peer = p.id
await self.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave)
codex_block_exchange_want_have_lists_sent.inc()
proc sendWantBlock(
self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
await self.network.request.sendWantList(
blockPeer.id, addresses, wantType = WantType.WantBlock
) # we want this remote to send us a block
codex_block_exchange_want_block_lists_sent.inc()
proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
Rng.instance.sample(peers)
proc downloadInternal(
self: BlockExcEngine, address: BlockAddress
) {.async: (raises: []).} =
logScope:
address = address
let handle = self.pendingBlocks.getWantHandle(address)
trace "Downloading block"
try:
while address in self.pendingBlocks:
logScope:
retries = self.pendingBlocks.retries(address)
interval = self.pendingBlocks.retryInterval
if self.pendingBlocks.retriesExhausted(address):
trace "Error retries exhausted"
handle.fail(newException(RetriesExhaustedError, "Error retries exhausted"))
break
trace "Running retry handle"
let peers = self.peers.getPeersForBlock(address)
logScope:
peersWith = peers.with.len
peersWithout = peers.without.len
trace "Peers for block"
if peers.with.len > 0:
self.pendingBlocks.setInFlight(address, true)
await self.sendWantBlock(@[address], peers.with.randomPeer)
else:
self.pendingBlocks.setInFlight(address, false)
if peers.without.len > 0:
await self.sendWantHave(@[address], peers.without)
self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
await (handle or sleepAsync(self.pendingBlocks.retryInterval))
self.pendingBlocks.decRetries(address)
if handle.finished:
trace "Handle for block finished", failed = handle.failed
break
except CancelledError as exc:
trace "Block download cancelled"
if not handle.finished:
await handle.cancelAndWait()
except RetriesExhaustedError as exc:
warn "Retries exhausted for block", address, exc = exc.msg
if not handle.finished:
handle.fail(exc)
finally:
self.pendingBlocks.setInFlight(address, false)
proc requestBlock*(
self: BlockExcEngine, address: BlockAddress
): Future[?!Block] {.async: (raises: [CancelledError]).} =
if address notin self.pendingBlocks:
self.trackedFutures.track(self.downloadInternal(address))
try:
let handle = self.pendingBlocks.getWantHandle(address)
success await handle
except CancelledError as err:
warn "Block request cancelled", address
raise err
except CatchableError as err:
error "Block request failed", address, err = err.msg
failure err
proc requestBlock*(
self: BlockExcEngine, cid: Cid
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
self.requestBlock(BlockAddress.init(cid))
proc blockPresenceHandler*(
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async: (raises: []).} =
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
let
peerCtx = self.peers.get(peer)
ourWantList = toSeq(self.pendingBlocks.wantList)
if peerCtx.isNil:
return
for blk in blocks:
if presence =? Presence.init(blk):
peerCtx.setPresence(presence)
let
peerHave = peerCtx.peerHave
dontWantCids = peerHave.filterIt(it notin ourWantList)
if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids)
let ourWantCids = ourWantList.filterIt(
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
not self.pendingBlocks.isInFlight(it)
)
for address in ourWantCids:
self.pendingBlocks.setInFlight(address, true)
self.pendingBlocks.decRetries(address)
if ourWantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
warn "Failed to send wantBlock to peer", peer, err = err.msg
proc scheduleTasks(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to
for p in self.peers:
for c in cids: # for each cid
# schedule a peer if it wants at least one cid
# and we have it in our local store
if c in p.peerWantsCids:
try:
if await (c in self.localStore):
# TODO: the try/except should go away once blockstore tracks exceptions
self.scheduleTask(p)
break
except CancelledError as exc:
warn "Checking local store canceled", cid = c, err = exc.msg
return
except CatchableError as exc:
error "Error checking local store for cid", cid = c, err = exc.msg
raiseAssert "Unexpected error checking local store for cid"
proc cancelBlocks(
self: BlockExcEngine, addrs: seq[BlockAddress]
) {.async: (raises: [CancelledError]).} =
## Tells neighboring peers that we're no longer interested in a block.
##
if self.peers.len == 0:
return
trace "Sending block request cancellations to peers",
addrs, peers = self.peers.peerIds
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
await self.network.request.sendWantCancellations(
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
)
return peerCtx
try:
let (succeededFuts, failedFuts) = await allFinishedFailed[BlockExcPeerCtx](
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
processPeer
)
)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
peerCtx.cleanPresence(addrs)
if failedFuts.len > 0:
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
else:
trace "Block request cancellations sent to peers", peers = self.peers.len
except CancelledError as exc:
warn "Error sending block request cancellations", error = exc.msg
raise exc
except CatchableError as exc:
warn "Error sending block request cancellations", error = exc.msg
proc resolveBlocks*(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
self.pendingBlocks.resolve(blocksDelivery)
await self.scheduleTasks(blocksDelivery)
await self.cancelBlocks(blocksDelivery.mapIt(it.address))
proc resolveBlocks*(
self: BlockExcEngine, blocks: seq[Block]
) {.async: (raises: [CancelledError]).} =
await self.resolveBlocks(
blocks.mapIt(
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
)
)
proc payForBlocks(
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let
sendPayment = self.network.request.sendPayment
price = peer.price(blocksDelivery.mapIt(it.address))
if payment =? self.wallet.pay(peer, price):
trace "Sending payment for blocks", price, len = blocksDelivery.len
await sendPayment(peer.id, payment)
proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
if bd.address notin self.pendingBlocks:
return failure("Received block is not currently a pending block")
if bd.address.leaf:
without proof =? bd.proof:
return failure("Missing proof")
if proof.index != bd.address.index:
return failure(
"Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index
)
without leaf =? bd.blk.cid.mhash.mapFailure, err:
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
return
failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
if err =? proof.verify(leaf, treeRoot).errorOption:
return failure("Unable to verify proof for block, nested err: " & err.msg)
else: # not leaf
if bd.address.cid != bd.blk.cid:
return failure(
"Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid
)
return success()
proc blocksDeliveryHandler*(
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery]
for bd in blocksDelivery:
logScope:
peer = peer
address = bd.address
try:
if err =? self.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg
continue
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
error "Unable to store block", err = err.msg
continue
if bd.address.leaf:
without proof =? bd.proof:
warn "Proof expected for a leaf block delivery"
continue
if err =? (
await self.localStore.putCidAndProof(
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
)
).errorOption:
warn "Unable to store proof and cid for a block"
continue
except CatchableError as exc:
warn "Error handling block delivery", error = exc.msg
continue
validatedBlocksDelivery.add(bd)
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let peerCtx = self.peers.get(peer)
if peerCtx != nil:
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
warn "Error paying for blocks", err = err.msg
return
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
warn "Error resolving blocks", err = err.msg
return
proc wantListHandler*(
self: BlockExcEngine, peer: PeerId, wantList: WantList
) {.async: (raises: []).} =
trace "Received want list from peer", peer, wantList = wantList.entries.len
let peerCtx = self.peers.get(peer)
if peerCtx.isNil:
return
var
presence: seq[BlockPresence]
schedulePeer = false
try:
for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope:
peer = peerCtx.id
address = e.address
wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants
let
have =
try:
await e.address in self.localStore
except CatchableError as exc:
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
false
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel:
trace "Received cancelation for untracked block, skipping",
address = e.address
continue
trace "Processing want list entry", wantList = $e
case e.wantType
of WantType.WantHave:
if have:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
if e.sendDontHave:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.DontHave, price: price
)
)
codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock:
peerCtx.peerWants.add(e)
schedulePeer = true
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
trace "Canceled block request",
address = e.address, len = peerCtx.peerWants.len
else:
if e.wantType == WantType.WantBlock:
schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request",
address = e.address, len = peerCtx.peerWants.len
if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
await self.network.request.sendPresence(peer, presence)
if schedulePeer:
self.scheduleTask(peerCtx)
except CancelledError as exc: #TODO: replace with CancelledError
warn "Error processing want list", error = exc.msg
proc accountHandler*(
self: BlockExcEngine, peer: PeerId, account: Account
) {.async: (raises: []).} =
let context = self.peers.get(peer)
if context.isNil:
return
context.account = account.some
proc paymentHandler*(
self: BlockExcEngine, peer: PeerId, payment: SignedState
) {.async: (raises: []).} =
trace "Handling payments", peer
without context =? self.peers.get(peer).option and account =? context.account:
trace "No context or account for peer", peer
return
if channel =? context.paymentChannel:
let sender = account.address
discard self.wallet.acceptPayment(channel, Asset, sender, payment)
else:
context.paymentChannel = self.wallet.acceptChannel(payment).option
proc setupPeer*(
self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Perform initial setup, such as want
## list exchange
##
trace "Setting up peer", peer
if peer notin self.peers:
trace "Setting up new peer", peer
self.peers.add(BlockExcPeerCtx(id: peer))
trace "Added peer", peers = self.peers.len
# broadcast our want list, the other peer will do the same
if self.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(self.pendingBlocks.wantList)
await self.network.request.sendWantList(peer, cids, full = true)
if address =? self.pricing .? address:
trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
## Cleanup disconnected peer
##
trace "Dropping peer", peer
# drop the peer from the peers table
self.peers.remove(peer)
proc taskHandler*(
self: BlockExcEngine, task: BlockExcPeerCtx
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
# Send to the peer blocks he wants to get,
# if they present in our local store
# TODO: There should be all sorts of accounting of
# bytes sent/received here
var wantsBlocks =
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
for peerWant in task.peerWants.mitems:
if peerWant.address in addresses:
peerWant.inFlight = inFlight
if wantsBlocks.len > 0:
# Mark wants as in-flight.
let wantAddresses = wantsBlocks.mapIt(it.address)
updateInFlight(wantAddresses, true)
wantsBlocks.sort(SortOrder.Descending)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
if e.address.leaf:
(await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
)
)
else:
(await self.localStore.getBlock(e.address)).map(
(blk: Block) =>
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
)
let
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
if bd =? it.value:
bd
else:
raiseAssert "Unexpected error in local lookup"
# All the wants that failed local lookup must be set to not-in-flight again.
let
successAddresses = blocksDelivery.mapIt(it.address)
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
updateInFlight(failedAddresses, false)
if blocksDelivery.len > 0:
trace "Sending blocks to peer",
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
await self.network.request.sendBlocksDelivery(task.id, blocksDelivery)
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
task.peerWants.keepItIf(it.address notin successAddresses)
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
## process tasks
##
trace "Starting blockexc task runner"
try:
while self.blockexcRunning:
let peerCtx = await self.taskQueue.pop()
await self.taskHandler(peerCtx)
except CatchableError as exc:
error "error running block exchange task", error = exc.msg
info "Exiting blockexc task runner"
proc new*(
T: type BlockExcEngine,
localStore: BlockStore,
wallet: WalletRef,
network: BlockExcNetwork,
discovery: DiscoveryEngine,
advertiser: Advertiser,
peerStore: PeerCtxStore,
pendingBlocks: PendingBlocksManager,
concurrentTasks = DefaultConcurrentTasks,
): BlockExcEngine =
## Create new block exchange engine instance
##
let self = BlockExcEngine(
localStore: localStore,
peers: peerStore,
pendingBlocks: pendingBlocks,
network: network,
wallet: wallet,
concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures(),
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery,
advertiser: advertiser,
)
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
await self.setupPeer(peerId)
else:
self.dropPeer(peerId)
if not isNil(network.switch):
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler(
peer: PeerId, wantList: WantList
): Future[void] {.async: (raises: []).} =
self.wantListHandler(peer, wantList)
proc blockPresenceHandler(
peer: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raises: []).} =
self.blockPresenceHandler(peer, presence)
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.async: (raises: []).} =
self.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(
peer: PeerId, account: Account
): Future[void] {.async: (raises: []).} =
self.accountHandler(peer, account)
proc paymentHandler(
peer: PeerId, payment: SignedState
): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment)
network.handlers = BlockExcHandlers(
onWantList: blockWantListHandler,
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler,
)
return self

View File

@ -1,46 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import pkg/nitro
import pkg/questionable/results
import ../peers
export nitro
export results
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
func openLedgerChannel*(
wallet: WalletRef, hub: EthAddress, asset: EthAddress
): ?!ChannelId =
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
if channel =? peer.paymentChannel:
success channel
elif account =? peer.account:
let channel = ?wallet.openLedgerChannel(account.address, Asset)
peer.paymentChannel = channel.some
success channel
else:
failure "no account set for peer"
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
if account =? peer.account:
let asset = Asset
let receiver = account.address
let channel = ?wallet.getOrOpenChannel(peer)
wallet.pay(channel, asset, receiver, amount)
else:
failure "no account set for peer"

View File

@ -1,183 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/tables
import std/monotimes
import std/strutils
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
logScope:
topics = "codex pendingblocks"
declareGauge(
codex_block_exchange_pending_block_requests,
"codex blockexchange pending block requests",
)
declareGauge(
codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us"
)
const
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 500.millis
type
RetriesExhaustedError* = object of CatchableError
BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError])
BlockReq* = object
handle*: BlockHandle
inFlight*: bool
blockRetries*: int
startTime*: int64
PendingBlocksManager* = ref object of RootObj
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, inFlight = false
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
self.blocks.withValue(address, blk):
return blk[].handle
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
self.blocks.del(address)
self.updatePendingBlockGauge()
handle.addCallback(cleanUpBlock)
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
if not handle.finished:
handle.removeCallback(cleanUpBlock)
cleanUpBlock(nil)
self.updatePendingBlockGauge()
return handle
proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, inFlight = false
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), inFlight)
proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
for bd in blocksDelivery:
self.blocks.withValue(bd.address, blockReq):
if not blockReq[].handle.finished:
trace "Resolving pending block", address = bd.address
let
startTime = blockReq[].startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else:
trace "Block handle already finished", address = bd.address
func retries*(self: PendingBlocksManager, address: BlockAddress): int =
self.blocks.withValue(address, pending):
result = pending[].blockRetries
do:
result = 0
func decRetries*(self: PendingBlocksManager, address: BlockAddress) =
self.blocks.withValue(address, pending):
pending[].blockRetries -= 1
func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool =
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
## Set inflight status for a block
##
self.blocks.withValue(address, pending):
pending[].inFlight = inFlight
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##
self.blocks.withValue(address, pending):
result = pending[].inFlight
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks
func contains*(self: PendingBlocksManager, address: BlockAddress): bool =
address in self.blocks
iterator wantList*(self: PendingBlocksManager): BlockAddress =
for a in self.blocks.keys:
yield a
iterator wantListBlockCids*(self: PendingBlocksManager): Cid =
for a in self.blocks.keys:
if not a.leaf:
yield a.cid
iterator wantListCids*(self: PendingBlocksManager): Cid =
var yieldedCids = initHashSet[Cid]()
for a in self.blocks.keys:
let cid = a.cidOrTreeCid
if cid notin yieldedCids:
yieldedCids.incl(cid)
yield cid
iterator wantHandles*(self: PendingBlocksManager): Future[Block] =
for v in self.blocks.values:
yield v.handle
proc wantListLen*(self: PendingBlocksManager): int =
self.blocks.len
func len*(self: PendingBlocksManager): int =
self.blocks.len
func new*(
T: type PendingBlocksManager,
retries = DefaultBlockRetries,
interval = DefaultRetryInterval,
): PendingBlocksManager =
PendingBlocksManager(blockRetries: retries, retryInterval: interval)

View File

@ -1,4 +0,0 @@
import ./network/network
import ./network/networkpeer
export network, networkpeer

View File

@ -1,107 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import pkg/chronos
import pkg/libp2p
import ../protobuf/blockexc
import ../protobuf/message
import ../../errors
import ../../logutils
import ../../utils/trackedfutures
logScope:
topics = "codex blockexcnetworkpeer"
const DefaultYieldInterval = 50.millis
type
ConnProvider* =
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
NetworkPeer* = ref object of RootObj
id*: PeerId
handler*: RPCHandler
sendConn: Connection
getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
proc connected*(self: NetworkPeer): bool =
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
if isNil(conn):
trace "No connection to read from", peer = self.id
return
trace "Attaching read loop", peer = self.id, connId = conn.oid
try:
var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed:
if Moment.now() > nextYield:
nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop",
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis)
let
data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Received message", peer = self.id, connId = conn.oid
await self.handler(self, msg)
except CancelledError:
trace "Read loop cancelled"
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
trace "Detaching read loop", peer = self.id, connId = conn.oid
await conn.close()
proc connect*(
self: NetworkPeer
): Future[Connection] {.async: (raises: [CancelledError]).} =
if self.connected:
trace "Already connected", peer = self.id, connId = self.sendConn.oid
return self.sendConn
self.sendConn = await self.getConn()
self.trackedFutures.track(self.readLoop(self.sendConn))
return self.sendConn
proc send*(
self: NetworkPeer, msg: Message
) {.async: (raises: [CancelledError, LPStreamError]).} =
let conn = await self.connect()
if isNil(conn):
warn "Unable to get send connection for peer message not sent", peer = self.id
return
trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg))
func new*(
T: type NetworkPeer,
peer: PeerId,
connProvider: ConnProvider,
rpcHandler: RPCHandler,
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
NetworkPeer(
id: peer,
getConn: connProvider,
handler: rpcHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -1,4 +0,0 @@
import ./peers/peerctxstore
import ./peers/peercontext
export peerctxstore, peercontext

View File

@ -1,65 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/tables
import std/sets
import pkg/libp2p
import pkg/chronos
import pkg/nitro
import pkg/questionable
import ../protobuf/blockexc
import ../protobuf/payments
import ../protobuf/presence
import ../../blocktype
import ../../logutils
export payments, nitro
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
for a in addresses:
self.blocks.del(a)
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address])
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
var price = 0.u256
for a in addresses:
self.blocks.withValue(a, precense):
price += precense[].price
price

View File

@ -1,87 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sequtils
import std/tables
import std/algorithm
import std/sequtils
import pkg/chronos
import pkg/libp2p
import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
import ./peercontext
export peercontext
logScope:
topics = "codex peerctxstore"
type
PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]]
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
for p in self.peers.values:
yield p
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.id == b)
func peerIds*(self: PeerCtxStore): seq[PeerId] =
toSeq(self.peers.keys)
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
peerId in self.peers
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
self.peers[peer.id] = peer
func remove*(self: PeerCtxStore, peerId: PeerId) =
self.peers.del(peerId)
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
self.peers.getOrDefault(peerId, nil)
func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if peer.peerHave.anyIt(it == address):
res.with.add(peer)
else:
res.without.add(peer)
res
proc new*(T: type PeerCtxStore): PeerCtxStore =
## create new instance of a peer context store
PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]())

View File

@ -1,52 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/hashes
import std/sequtils
import pkg/stew/endians2
import message
import ../../blocktype
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash =
hash(e.address)
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.address == b)
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
return a.address == b
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.address == b)

View File

@ -1,273 +0,0 @@
# Protocol of data exchange between Codex nodes
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
import std/sugar
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/cid
import pkg/questionable
import ../../units
import ../../merkletree
import ../../blocktype
const
MaxBlockSize* = 100.MiBs.uint
MaxMessageSize* = 100.MiBs.uint
type
WantType* = enum
WantBlock = 0
WantHave = 1
WantListEntry* = object
address*: BlockAddress
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
full*: bool # Whether this is the full wantList. default to false
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?CodexProof # Present only if `address.leaf` is true
BlockPresenceType* = enum
Have = 0
DontHave = 1
BlockPresence* = object
address*: BlockAddress
`type`*: BlockPresenceType
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
AccountMessage* = object
address*: seq[byte] # Ethereum address to which payments should be made
StateChannelUpdate* = object
update*: seq[byte] # Signed Nitro state, serialized as JSON
Message* = object
wantList*: WantList
payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence]
pendingBytes*: uint
account*: AccountMessage
payment*: StateChannelUpdate
#
# Encoding Message into seq[byte] in Protobuf format
#
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
var ipb = initProtoBuffer()
ipb.write(1, value.leaf.uint)
if value.leaf:
ipb.write(2, value.treeCid.data.buffer)
ipb.write(3, value.index.uint64)
else:
ipb.write(4, value.cid.data.buffer)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.priority.uint64)
ipb.write(3, value.cancel.uint)
ipb.write(4, value.wantType.uint)
ipb.write(5, value.sendDontHave.uint)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
var ipb = initProtoBuffer()
for v in value.entries:
ipb.write(1, v)
ipb.write(2, value.full.uint)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
if value.address.leaf:
if proof =? value.proof:
ipb.write(4, proof.encode())
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.`type`.uint)
ipb.write(3, value.price)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: AccountMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
var ipb = initProtoBuffer()
ipb.write(1, value.update)
ipb.finish()
pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
for v in value.blockPresences:
ipb.write(4, v)
ipb.write(5, value.pendingBytes)
ipb.write(6, value.account)
ipb.write(7, value.payment)
ipb.finish()
ipb.buffer
#
# Decoding Message from seq[byte] in Protobuf format
#
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
var
value: BlockAddress
leaf: bool
field: uint64
cidBuf = newSeq[byte]()
if ?pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
index: Natural
if ?pb.getField(2, cidBuf):
treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, field):
index = field
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
else:
var cid: Cid
if ?pb.getField(4, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
value = BlockAddress(leaf: false, cid: cid)
ok(value)
proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] =
var
value = WantListEntry()
field: uint64
ipb: ProtoBuffer
if ?pb.getField(1, ipb):
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.priority = int32(field)
if ?pb.getField(3, field):
value.cancel = bool(field)
if ?pb.getField(4, field):
value.wantType = WantType(field)
if ?pb.getField(5, field):
value.sendDontHave = bool(field)
ok(value)
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
var
value = WantList()
field: uint64
sublist: seq[seq[byte]]
if ?pb.getRepeatedField(1, sublist):
for item in sublist:
value.entries.add(?WantListEntry.decode(initProtoBuffer(item)))
if ?pb.getField(2, field):
value.full = bool(field)
ok(value)
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
var
value = BlockDelivery()
dataBuf = newSeq[byte]()
cidBuf = newSeq[byte]()
cid: Cid
ipb: ProtoBuffer
if ?pb.getField(1, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, dataBuf):
value.blk =
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, ipb):
value.address = ?BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ?pb.getField(4, proofBuf):
let proof = ?CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = CodexProof.none
else:
value.proof = CodexProof.none
ok(value)
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
var
value = BlockPresence()
field: uint64
ipb: ProtoBuffer
if ?pb.getField(1, ipb):
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.`type` = BlockPresenceType(field)
discard ?pb.getField(3, value.price)
ok(value)
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
var value = AccountMessage()
discard ?pb.getField(1, value.address)
ok(value)
proc decode*(
_: type StateChannelUpdate, pb: ProtoBuffer
): ProtoResult[StateChannelUpdate] =
var value = StateChannelUpdate()
discard ?pb.getField(1, value.update)
ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var
value = Message()
pb = initProtoBuffer(msg)
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
discard ?pb.getField(5, value.pendingBytes)
if ?pb.getField(6, ipb):
value.account = ?AccountMessage.decode(ipb)
if ?pb.getField(7, ipb):
value.payment = ?StateChannelUpdate.decode(ipb)
ok(value)

View File

@ -1,58 +0,0 @@
// Protocol of data exchange between Codex nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";
package blockexc.message.pb;
message Message {
message Wantlist {
enum WantType {
wantBlock = 0;
wantHave = 1;
}
message Entry {
bytes block = 1; // the block cid
int32 priority = 2; // the priority (normalized). default to 1
bool cancel = 3; // whether this revokes an entry
WantType wantType = 4; // Note: defaults to enum 0, ie Block
bool sendDontHave = 5; // Note: defaults to false
}
repeated Entry entries = 1; // a list of wantlist entries
bool full = 2; // whether this is the full wantlist. default to false
}
message Block {
bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length)
bytes data = 2;
}
enum BlockPresenceType {
presenceHave = 0;
presenceDontHave = 1;
}
message BlockPresence {
bytes cid = 1;
BlockPresenceType type = 2;
bytes price = 3; // Amount of assets to pay for the block (UInt256)
}
message AccountMessage {
bytes address = 1; // Ethereum address to which payments should be made
}
message StateChannelUpdate {
bytes update = 1; // Signed Nitro state, serialized as JSON
}
Wantlist wantlist = 1;
repeated Block payload = 3;
repeated BlockPresence blockPresences = 4;
int32 pendingBytes = 5;
AccountMessage account = 6;
StateChannelUpdate payment = 7;
}

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014-2018 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,38 +0,0 @@
{.push raises: [].}
import pkg/stew/byteutils
import pkg/stint
import pkg/nitro
import pkg/questionable
import ./blockexc
export AccountMessage
export StateChannelUpdate
export stint
export nitro
type Account* = object
address*: EthAddress
func init*(_: type AccountMessage, account: Account): AccountMessage =
AccountMessage(address: @(account.address.toArray))
func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress =
var address: array[20, byte]
if bytes.len != address.len:
return EthAddress.none
for i in 0 ..< address.len:
address[i] = bytes[i]
EthAddress(address).some
func init*(_: type Account, message: AccountMessage): ?Account =
without address =? EthAddress.parse(message.address):
return none Account
some Account(address: address)
func init*(_: type StateChannelUpdate, state: SignedState): StateChannelUpdate =
StateChannelUpdate(update: state.toJson.toBytes)
proc init*(_: type SignedState, update: StateChannelUpdate): ?SignedState =
SignedState.fromJson(string.fromBytes(update.update))

View File

@ -1,42 +0,0 @@
{.push raises: [].}
import libp2p
import pkg/stint
import pkg/questionable
import pkg/questionable/results
import ./blockexc
import ../../blocktype
export questionable
export stint
export BlockPresenceType
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object
address*: BlockAddress
have*: bool
price*: UInt256
func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
if bytes.len > 32:
return UInt256.none
UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence =
without price =? UInt256.parse(message.price):
return none Presence
some Presence(
address: message.address,
have: message.`type` == BlockPresenceType.Have,
price: price,
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address,
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE),
)

View File

@ -1,336 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/strutils
import std/os
import std/tables
import std/cpuinfo
import pkg/chronos
import pkg/taskpools
import pkg/presto
import pkg/libp2p
import pkg/confutils
import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2
import ./node
import ./conf
import ./rng as random
import ./rest/api
import ./stores
import ./slots
import ./blockexchange
import ./utils/fileutils
import ./erasure
import ./discovery
import ./contracts
import ./systemclock
import ./contracts/clock
import ./contracts/deployment
import ./utils/addrutils
import ./namespaces
import ./codextypes
import ./logutils
import ./nat
logScope:
topics = "codex node"
type
CodexServer* = ref object
config: CodexConf
restServer: RestServerRef
codexNode: CodexNodeRef
repoStore: RepoStore
maintenance: BlockMaintainer
CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1
trace "Checking sync state of Ethereum provider..."
while await provider.isSyncing:
notice "Waiting for Ethereum provider to sync..."
await sleepAsync(sleepTime.seconds)
if sleepTime < 10:
inc sleepTime
trace "Ethereum provider is synced."
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
## bootstrap interactions and return contracts
## using clients, hosts, validators pairings
##
let
config = s.config
repo = s.repoStore
if config.persistence:
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
error "Persistence enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
signer = provider.getSigner(account)
elif keyFile =? config.ethPrivateKey:
without isSecure =? checkSecureFile(keyFile):
error "Could not check file permissions: does Ethereum private key file exist?"
quit QuitFailure
if not isSecure:
error "Ethereum private key file does not have safe file permissions"
quit QuitFailure
without key =? keyFile.readAllChars():
error "Unable to read Ethereum private key file"
quit QuitFailure
without wallet =? EthWallet.new(key.strip(), provider):
error "Invalid Ethereum private key in file"
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config.marketplaceAddress)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
let marketplace = Marketplace.new(marketplaceAddress, signer)
let market = OnChainMarket.new(
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
)
let clock = OnChainClock.new(provider)
var client: ?ClientInteractions
var host: ?HostInteractions
var validator: ?ValidatorInteractions
if config.validator or config.persistence:
s.codexNode.clock = clock
else:
s.codexNode.clock = SystemClock()
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when codex_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
else:
let proofFailures = 0
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
if error =? (await market.loadConfig()).errorOption:
fatal "Cannot load market configuration", error = error.msg
quit QuitFailure
let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing)
host = some HostInteractions.new(clock, sales)
if config.validator:
without validationConfig =?
ValidationConfig.init(
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
), err:
error "Invalid validation parameters", err = err.msg
quit QuitFailure
let validation = Validation.new(clock, market, validationConfig)
validator = some ValidatorInteractions.new(clock, validation)
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config
await s.repoStore.start()
s.maintenance.start()
await s.codexNode.switch.start()
let (announceAddrs, discoveryAddrs) = nattedAddress(
s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort
)
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
await s.bootstrapInteractions()
await s.codexNode.start()
s.restServer.start()
proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node"
let res = await noCancel allFinishedFailed[void](
@[
s.restServer.stop(),
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
]
)
if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len
raiseAssert "Failed to stop codex node"
proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
): CodexServer =
## create CodexServer including setting up datastore, repostore, etc
let switch = SwitchBuilder
.new()
.withPrivateKey(privateKey)
.withAddresses(config.listenAddrs)
.withRng(random.Rng.instance())
.withNoise()
.withMplex(5.minutes, 5.minutes)
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr})
.build()
var
cache: CacheStore = nil
taskpool: Taskpool
try:
if config.numThreads == ThreadCount(0):
taskpool = Taskpool.new(numThreads = min(countProcessors(), 16))
else:
taskpool = Taskpool.new(numThreads = int(config.numThreads))
info "Threadpool started", numThreads = taskpool.numThreads
except CatchableError as exc:
raiseAssert("Failure in taskpool initialization:" & exc.msg)
if config.cacheSize > 0'nb:
cache = CacheStore.new(cacheSize = config.cacheSize)
## Is unused?
let discoveryDir = config.dataDir / CodexDhtNamespace
if io2.createPath(discoveryDir).isErr:
trace "Unable to create discovery directory for block store",
discoveryDir = discoveryDir
raise (ref Defect)(
msg: "Unable to create discovery directory for block store: " & discoveryDir
)
let
discoveryStore = Datastore(
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect(
"Should create discovery datastore!"
)
)
discovery = Discovery.new(
switch.peerInfo.privateKey,
announceAddrs = config.listenAddrs,
bindPort = config.discoveryPort,
bootstrapNodes = config.bootstrapNodes,
store = discoveryStore,
)
wallet = WalletRef.new(EthPrivateKey.random())
network = BlockExcNetwork.new(switch)
repoData =
case config.repoKind
of repoFS:
Datastore(
FSDatastore.new($config.dataDir, depth = 5).expect(
"Should create repo file data store!"
)
)
of repoSQLite:
Datastore(
SQLiteDatastore.new($config.dataDir).expect(
"Should create repo SQLite data store!"
)
)
of repoLevelDb:
Datastore(
LevelDbDatastore.new($config.dataDir).expect(
"Should create repo LevelDB data store!"
)
)
repoStore = RepoStore.new(
repoDs = repoData,
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect(
"Should create metadata store!"
),
quotaMaxBytes = config.storageQuota,
blockTtl = config.blockTtl,
)
maintenance = BlockMaintainer.new(
repoStore,
interval = config.blockMaintenanceInterval,
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
)
store = NetworkStore.new(engine, repoStore)
prover =
if config.prover:
let backend =
config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
else:
none Prover
codexNode = CodexNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
discovery = discovery,
prover = prover,
taskPool = taskpool,
)
restServer = RestServerRef
.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high,
)
.expect("Should create rest server!")
switch.mount(network)
CodexServer(
config: config,
codexNode: codexNode,
restServer: restServer,
repoStore: repoStore,
maintenance: maintenance,
)

View File

@ -1,11 +0,0 @@
import contracts/requests
import contracts/marketplace
import contracts/market
import contracts/interactions
import contracts/provider
export requests
export marketplace
export market
export interactions
export provider

View File

@ -1,148 +0,0 @@
Codex Contracts in Nim
=======================
Nim API for the [Codex smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1].
Smart contract
--------------
Connecting to the smart contract on an Ethereum node:
```nim
import codex/contracts
import ethers
let address = # fill in address where the contract was deployed
let provider = JsonRpcProvider.new("ws://localhost:8545")
let marketplace = Marketplace.new(address, provider)
```
Setup client and host so that they can sign transactions; here we use the first
two accounts on the Ethereum node:
```nim
let accounts = await provider.listAccounts()
let client = provider.getSigner(accounts[0])
let host = provider.getSigner(accounts[1])
```
Storage requests
----------------
Creating a request for storage:
```nim
let request : StorageRequest = (
client: # address of the client requesting storage
duration: # duration of the contract in seconds
size: # size in bytes
contentHash: # SHA256 hash of the content that's going to be stored
proofProbability: # require a storage proof roughly once every N periods
maxPrice: # maximum price the client is willing to pay
expiry: # expiration time of the request (in unix time)
nonce: # random nonce to differentiate between similar requests
)
```
When a client wants to submit this request to the network, it needs to pay the
maximum price to the smart contract in advance. The difference between the
maximum price and the offered price will be reimbursed later.
Once the payment has been prepared, the client can submit the request to the
network:
```nim
await storage
.connect(client)
.requestStorage(request)
```
Storage offers
--------------
Creating a storage offer:
```nim
let offer: StorageOffer = (
host: # address of the host that is offering storage
requestId: request.id,
price: # offered price (in number of tokens)
expiry: # expiration time of the offer (in unix time)
)
```
Hosts submits an offer:
```nim
await storage
.connect(host)
.offerStorage(offer)
```
Client selects an offer:
```nim
await storage
.connect(client)
.selectOffer(offer.id)
```
Starting and finishing a storage contract
-----------------------------------------
The host whose offer got selected can start the storage contract once it
received the data that needs to be stored:
```nim
await storage
.connect(host)
.startContract(offer.id)
```
Once the storage contract is finished, the host can release payment:
```nim
await storage
.connect(host)
.finishContract(id)
```
Storage proofs
--------------
Time is divided into periods, and each period a storage proof may be required
from the host. The odds of requiring a storage proof are negotiated through the
storage request. For more details about the timing of storage proofs, please
refer to the [design document][2].
At the start of each period of time, the host can check whether a storage proof
is required:
```nim
let isProofRequired = await storage.isProofRequired(offer.id)
```
If a proof is required, the host can submit it before the end of the period:
```nim
await storage
.connect(host)
.submitProof(id, proof)
```
If a proof is not submitted, then a validator can mark a proof as missing:
```nim
await storage
.connect(validator)
.markProofAsMissing(id, period)
```
[1]: https://github.com/status-im/codex-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md

View File

@ -1,78 +0,0 @@
import std/times
import pkg/ethers
import pkg/questionable
import pkg/chronos
import pkg/stint
import ../clock
import ../conf
import ../utils/trackedfutures
export clock
logScope:
topics = "contracts clock"
type OnChainClock* = ref object of Clock
provider: Provider
subscription: Subscription
offset: times.Duration
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber:
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
let computerTime = getTime()
clock.offset = blockTime - computerTime
clock.blockNumber = number
trace "updated clock",
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async: (raises: []).} =
try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest)
except CatchableError as error:
debug "error updating clock: ", error = error.msg
method start*(clock: OnChainClock) {.async.} =
if clock.started:
return
proc onBlock(blckResult: ?!Block) =
if eventError =? blckResult.errorOption:
error "There was an error in block subscription", msg = eventError.msg
return
# ignore block parameter; hardhat may call this with pending blocks
clock.trackedFutures.track(clock.update())
await clock.update()
clock.subscription = await clock.provider.subscribe(onBlock)
clock.started = true
method stop*(clock: OnChainClock) {.async.} =
if not clock.started:
return
await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -1,104 +0,0 @@
import pkg/contractabi
import pkg/ethers/contracts/fields
import pkg/questionable/results
export contractabi
const DefaultRequestCacheSize* = 128.uint16
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
type
MarketplaceConfig* = object
collateral*: CollateralConfig
proofs*: ProofConfig
reservations*: SlotReservationsConfig
requestDurationLimit*: uint64
CollateralConfig* = object
repairRewardPercentage*: uint8
# percentage of remaining collateral slot has after it has been freed
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
slashPercentage*: uint8 # percentage of the collateral that is slashed
validatorRewardPercentage*: uint8
# percentage of the slashed amount going to the validators
ProofConfig* = object
period*: uint64 # proofs requirements are calculated per period (in seconds)
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
downtime*: uint8 # ignore this much recent blocks for proof requirements
downtimeProduct*: uint8
zkeyHash*: string # hash of the zkey file which is linked to the verifier
# Ensures the pointer does not remain in downtime for many consecutive
# periods. For each period increase, move the pointer `pointerProduct`
# blocks. Should be a prime number to ensure there are no cycles.
SlotReservationsConfig* = object
maxReservations*: uint8
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
ProofConfig(
period: tupl[0],
timeout: tupl[1],
downtime: tupl[2],
downtimeProduct: tupl[3],
zkeyHash: tupl[4],
)
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
SlotReservationsConfig(maxReservations: tupl[0])
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
CollateralConfig(
repairRewardPercentage: tupl[0],
maxNumberOfSlashes: tupl[1],
slashPercentage: tupl[2],
validatorRewardPercentage: tupl[3],
)
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
MarketplaceConfig(
collateral: tupl[0],
proofs: tupl[1],
reservations: tupl[2],
requestDurationLimit: tupl[3],
)
func solidityType*(_: type SlotReservationsConfig): string =
solidityType(SlotReservationsConfig.fieldTypes)
func solidityType*(_: type ProofConfig): string =
solidityType(ProofConfig.fieldTypes)
func solidityType*(_: type CollateralConfig): string =
solidityType(CollateralConfig.fieldTypes)
func solidityType*(_: type MarketplaceConfig): string =
solidityType(MarketplaceConfig.fieldTypes)
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: CollateralConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
let tupl = ?decoder.read(ProofConfig.fieldTypes)
success ProofConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
success SlotReservationsConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
success CollateralConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T =
let tupl = ?decoder.read(MarketplaceConfig.fieldTypes)
success MarketplaceConfig.fromTuple(tupl)

View File

@ -1,48 +0,0 @@
import std/os
import std/tables
import pkg/ethers
import pkg/questionable
import ../conf
import ../logutils
import ./marketplace
type Deployment* = ref object
provider: Provider
marketplaceAddressOverride: ?Address
const knownAddresses = {
# Hardhat localhost network
"31337":
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
# Taiko Alpha-3 Testnet
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Jun 11 2025 17:04:56 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0xd53a4181862f42641ccA02Fb4CED7D7f19C6920B")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
let id = chainId.toString(10)
notice "Looking for well-known contract address with ChainID ", chainId = id
if not (id in knownAddresses):
return none Address
return knownAddresses[id].getOrDefault($T, Address.none)
proc new*(
_: type Deployment,
provider: Provider,
marketplaceAddressOverride: ?Address = none Address,
): Deployment =
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
when contract is Marketplace:
if address =? deployment.marketplaceAddressOverride:
return some address
let chainId = await deployment.provider.getChainId()
return contract.getKnownAddress(chainId)

View File

@ -1,9 +0,0 @@
import ./interactions/interactions
import ./interactions/hostinteractions
import ./interactions/clientinteractions
import ./interactions/validatorinteractions
export interactions
export hostinteractions
export clientinteractions
export validatorinteractions

View File

@ -1,26 +0,0 @@
import pkg/ethers
import ../../purchasing
import ../../logutils
import ../market
import ../clock
import ./interactions
export purchasing
export logutils
type ClientInteractions* = ref object of ContractInteractions
purchasing*: Purchasing
proc new*(
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
): ClientInteractions =
ClientInteractions(clock: clock, purchasing: purchasing)
proc start*(self: ClientInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.purchasing.start()
proc stop*(self: ClientInteractions) {.async.} =
await self.purchasing.stop()
await procCall ContractInteractions(self).stop()

View File

@ -1,24 +0,0 @@
import pkg/chronos
import ../../logutils
import ../../sales
import ./interactions
export sales
export logutils
type HostInteractions* = ref object of ContractInteractions
sales*: Sales
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
## Create a new HostInteractions instance
##
HostInteractions(clock: clock, sales: sales)
method start*(self: HostInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.sales.start()
method stop*(self: HostInteractions) {.async.} =
await self.sales.stop()
await procCall ContractInteractions(self).start()

View File

@ -1,15 +0,0 @@
import pkg/ethers
import ../clock
import ../marketplace
import ../market
export clock
type ContractInteractions* = ref object of RootObj
clock*: Clock
method start*(self: ContractInteractions) {.async, base.} =
discard
method stop*(self: ContractInteractions) {.async, base.} =
discard

View File

@ -1,20 +0,0 @@
import ./interactions
import ../../validation
export validation
type ValidatorInteractions* = ref object of ContractInteractions
validation: Validation
proc new*(
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
): ValidatorInteractions =
ValidatorInteractions(clock: clock, validation: validation)
proc start*(self: ValidatorInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.validation.start()
proc stop*(self: ValidatorInteractions) {.async.} =
await self.validation.stop()
await procCall ContractInteractions(self).stop()

View File

@ -1,667 +0,0 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/upraises
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
import ../logutils
import ../market
import ./marketplace
import ./proofs
import ./provider
export market
logScope:
topics = "marketplace onchain market"
type
OnChainMarket* = ref object of Market
contract: Marketplace
signer: Signer
rewardRecipient: ?Address
configuration: ?MarketplaceConfig
requestCache: LruCache[string, StorageRequest]
allowanceLock: AsyncLock
MarketSubscription = market.Subscription
EventSubscription = ethers.Subscription
OnChainMarketSubscription = ref object of MarketSubscription
eventSubscription: EventSubscription
func new*(
_: type OnChainMarket,
contract: Marketplace,
rewardRecipient = Address.none,
requestCacheSize: uint16 = DefaultRequestCacheSize,
): OnChainMarket =
without signer =? contract.signer:
raiseAssert("Marketplace contract should have a signer")
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
OnChainMarket(
contract: contract,
signer: signer,
rewardRecipient: rewardRecipient,
requestCache: requestCache,
)
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try:
body
except EthersError as error:
raiseMarketError(error.msgDetail.prefixWith(msg))
proc config(
market: OnChainMarket
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
without resolvedConfig =? market.configuration:
if err =? (await market.loadConfig()).errorOption:
raiseMarketError(err.msg)
without config =? market.configuration:
raiseMarketError("Failed to access to config from the Marketplace contract")
return config
return resolvedConfig
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
if market.allowanceLock.isNil:
market.allowanceLock = newAsyncLock()
await market.allowanceLock.acquire()
try:
body
finally:
try:
market.allowanceLock.release()
except AsyncLockError as error:
raise newException(Defect, error.msg, error)
proc approveFunds(
market: OnChainMarket, amount: UInt256
) {.async: (raises: [CancelledError, MarketError]).} =
debug "Approving tokens", amount
convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer)
let owner = await market.signer.getAddress()
let spender = market.contract.address
market.withAllowanceLock:
let allowance = await token.allowance(owner, spender)
discard await token.approve(spender, allowance + amount).confirm(1)
method loadConfig*(
market: OnChainMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
without config =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return success()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
)
method getZkeyHash*(
market: OnChainMarket
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
let config = await market.config()
return some config.proofs.zkeyHash
method getSigner*(
market: OnChainMarket
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get signer address"):
return await market.signer.getAddress()
method periodicity*(
market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
let period = config.proofs.period
return Periodicity(seconds: period)
method proofTimeout*(
market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.timeout
method repairRewardPercentage*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.requestDurationLimit
method proofDowntime*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError("Failed to get my requests"):
return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(
market: OnChainMarket, request: StorageRequest
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to request storage"):
debug "Requesting storage"
await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1)
method getRequest*(
market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
try:
let key = $id
if key in market.requestCache:
return some market.requestCache[key]
let request = await market.contract.getRequest(id)
market.requestCache[key] = request
return some request
except Marketplace_UnknownRequest, KeyError:
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
return none StorageRequest
except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError("Failed to get request state"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides)
except Marketplace_UnknownRequest:
return none RequestState
method slotState*(
market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id)
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id)
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
if address != Address.default:
return some address
else:
return none Address
method currentCollateral*(
market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError("Failed to get active slot"):
try:
return some await market.contract.getActiveSlot(slotId)
except Marketplace_SlotIsFree:
return none Slot
method fillSlot(
market: OnChainMarket,
requestId: RequestId,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fill slot"):
logScope:
requestId
slotIndex
try:
await market.approveFunds(collateral)
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
trace "calling fillSlot on contract"
discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method freeSlot*(
market: OnChainMarket, slotId: SlotId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to free slot"):
try:
var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use
# the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner()
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient
)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
freeSlot = market.contract.freeSlot(
slotId,
rewardRecipient, # --reward-recipient
collateralRecipient, # SP's address
overrides,
)
else:
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
freeSlot = market.contract.freeSlot(slotId, overrides)
discard await freeSlot.confirm(1)
except Marketplace_SlotIsFree as parent:
raise newException(
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
)
method withdrawFunds(
market: OnChainMarket, requestId: RequestId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides)
except Marketplace_SlotIsFree:
return false
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get future proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides)
except Marketplace_SlotIsFree:
return false
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(
market: OnChainMarket, id: SlotId, proof: Groth16Proof
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to submit proof"):
try:
discard await market.contract.submitProof(id, proof).confirm(1)
except Proofs_InvalidProof as parent:
raise newException(
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"):
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
method canMarkProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async: (raises: [CancelledError]).} =
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
return true
except EthersError as e:
trace "Proof cannot be marked as missing", msg = e.msg
return false
method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"):
try:
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} =
convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
callback(event.requestId, event.ask, event.expiry)
convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: uint64,
callback: OnSlotFilled,
): Future[MarketSubscription] {.async.} =
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return
callback(event.id)
convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
await subscription.eventSubscription.unsubscribe()
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock)
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events from block"):
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)
method slotCollateral*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let slotState = await market.slotState(slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
without repairRewardPercentage =?
market.configuration .? collateral .? repairRewardPercentage:
return failure newException(
MarketError,
"Failure calculating the slotCollateral, cannot get the reward percentage",
)
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
100.u256
)
)
return success(collateralPerSlot)

View File

@ -1,198 +0,0 @@
import pkg/ethers
import pkg/ethers/erc20
import pkg/json_rpc/rpcclient
import pkg/stint
import pkg/chronos
import ../clock
import ./requests
import ./proofs
import ./config
export stint
export ethers except `%`, `%*`, toJson
export erc20 except `%`, `%*`, toJson
export config
export requests
type
Marketplace* = ref object of Contract
Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError
Marketplace_SlashPercentageTooHigh* = object of SolidityError
Marketplace_MaximumSlashingTooHigh* = object of SolidityError
Marketplace_InvalidExpiry* = object of SolidityError
Marketplace_InvalidMaxSlotLoss* = object of SolidityError
Marketplace_InsufficientSlots* = object of SolidityError
Marketplace_InvalidClientAddress* = object of SolidityError
Marketplace_RequestAlreadyExists* = object of SolidityError
Marketplace_InvalidSlot* = object of SolidityError
Marketplace_SlotNotFree* = object of SolidityError
Marketplace_InvalidSlotHost* = object of SolidityError
Marketplace_AlreadyPaid* = object of SolidityError
Marketplace_TransferFailed* = object of SolidityError
Marketplace_UnknownRequest* = object of SolidityError
Marketplace_InvalidState* = object of SolidityError
Marketplace_StartNotBeforeExpiry* = object of SolidityError
Marketplace_SlotNotAcceptingProofs* = object of SolidityError
Marketplace_SlotIsFree* = object of SolidityError
Marketplace_ReservationRequired* = object of SolidityError
Marketplace_NothingToWithdraw* = object of SolidityError
Marketplace_InsufficientDuration* = object of SolidityError
Marketplace_InsufficientProofProbability* = object of SolidityError
Marketplace_InsufficientCollateral* = object of SolidityError
Marketplace_InsufficientReward* = object of SolidityError
Marketplace_InvalidCid* = object of SolidityError
Marketplace_DurationExceedsLimit* = object of SolidityError
Proofs_InsufficientBlockHeight* = object of SolidityError
Proofs_InvalidProof* = object of SolidityError
Proofs_ProofAlreadySubmitted* = object of SolidityError
Proofs_PeriodNotEnded* = object of SolidityError
Proofs_ValidationTimedOut* = object of SolidityError
Proofs_ProofNotMissing* = object of SolidityError
Proofs_ProofNotRequired* = object of SolidityError
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}
proc currentCollateral*(
marketplace: Marketplace, id: SlotId
): UInt256 {.contract, view.}
proc requestStorage*(
marketplace: Marketplace, request: StorageRequest
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
Marketplace_InsufficientReward, Marketplace_InvalidCid,
]
.}
proc fillSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc freeSlot*(
marketplace: Marketplace, id: SlotId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc freeSlot*(
marketplace: Marketplace,
id: SlotId,
rewardRecipient: Address,
collateralRecipient: Address,
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc getRequest*(
marketplace: Marketplace, id: RequestId
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
proc getActiveSlot*(
marketplace: Marketplace, id: SlotId
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
proc requestState*(
marketplace: Marketplace, requestId: RequestId
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
proc requestEnd*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc requestExpiry*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc getChallenge*(
marketplace: Marketplace, id: SlotId
): array[32, byte] {.contract, view.}
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
proc submitProof*(
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
): Confirmable {.
contract,
errors:
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
.}
proc markProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
]
.}
proc canMarkProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
Proofs_ProofAlreadyMarkedMissing,
]
.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): Confirmable {.contract.}
proc canReserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): bool {.contract, view.}

View File

@ -1,46 +0,0 @@
import pkg/stint
import pkg/contractabi
import pkg/ethers/contracts/fields
type
Groth16Proof* = object
a*: G1Point
b*: G2Point
c*: G1Point
G1Point* = object
x*: UInt256
y*: UInt256
# A field element F_{p^2} encoded as `real + i * imag`
Fp2Element* = object
real*: UInt256
imag*: UInt256
G2Point* = object
x*: Fp2Element
y*: Fp2Element
func solidityType*(_: type G1Point): string =
solidityType(G1Point.fieldTypes)
func solidityType*(_: type Fp2Element): string =
solidityType(Fp2Element.fieldTypes)
func solidityType*(_: type G2Point): string =
solidityType(G2Point.fieldTypes)
func solidityType*(_: type Groth16Proof): string =
solidityType(Groth16Proof.fieldTypes)
func encode*(encoder: var AbiEncoder, point: G1Point) =
encoder.write(point.fieldValues)
func encode*(encoder: var AbiEncoder, element: Fp2Element) =
encoder.write(element.fieldValues)
func encode*(encoder: var AbiEncoder, point: G2Point) =
encoder.write(point.fieldValues)
func encode*(encoder: var AbiEncoder, proof: Groth16Proof) =
encoder.write(proof.fieldValues)

View File

@ -1,123 +0,0 @@
import pkg/ethers/provider
import pkg/chronos
import pkg/questionable
import ../logutils
from ../clock import SecondsSince1970
logScope:
topics = "marketplace onchain provider"
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
raise newException(ProviderError, message)
proc blockNumberAndTimestamp*(
provider: Provider, blockTag: BlockTag
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
without latestBlock =? await provider.getBlock(blockTag):
raiseProviderError("Could not get latest block")
without latestBlockNumber =? latestBlock.number:
raiseProviderError("Could not get latest block number")
return (latestBlockNumber, latestBlock.timestamp)
proc binarySearchFindClosestBlock(
provider: Provider, epochTime: int, low: UInt256, high: UInt256
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
if abs(lowTimestamp.truncate(int) - epochTime) <
abs(highTimestamp.truncate(int) - epochTime):
return low
else:
return high
proc binarySearchBlockNumberForEpoch(
provider: Provider,
epochTime: UInt256,
latestBlockNumber: UInt256,
earliestBlockNumber: UInt256,
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
var low = earliestBlockNumber
var high = latestBlockNumber
while low <= high:
if low == 0 and high == 0:
return low
let mid = (low + high) div 2
let (midBlockNumber, midBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
if midBlockTimestamp < epochTime:
low = mid + 1
elif midBlockTimestamp > epochTime:
high = mid - 1
else:
return midBlockNumber
# NOTICE that by how the binary search is implemented, when it finishes
# low is always greater than high - this is why we use high, where
# intuitively we would use low:
await provider.binarySearchFindClosestBlock(
epochTime.truncate(int), low = high, high = low
)
proc blockNumberForEpoch*(
provider: Provider, epochTime: SecondsSince1970
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let epochTimeUInt256 = epochTime.u256
let (latestBlockNumber, latestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.latest)
let (earliestBlockNumber, earliestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.earliest)
# Initially we used the average block time to predict
# the number of blocks we need to look back in order to find
# the block number corresponding to the given epoch time.
# This estimation can be highly inaccurate if block time
# was changing in the past or is fluctuating and therefore
# we used that information initially only to find out
# if the available history is long enough to perform effective search.
# It turns out we do not have to do that. There is an easier way.
#
# First we check if the given epoch time equals the timestamp of either
# the earliest or the latest block. If it does, we just return the
# block number of that block.
#
# Otherwise, if the earliest available block is not the genesis block,
# we should check the timestamp of that earliest block and if it is greater
# than the epoch time, we should issue a warning and return
# that earliest block number.
# In all other cases, thus when the earliest block is not the genesis
# block but its timestamp is not greater than the requested epoch time, or
# if the earliest available block is the genesis block,
# (which means we have the whole history available), we should proceed with
# the binary search.
#
# Additional benefit of this method is that we do not have to rely
# on the average block time, which not only makes the whole thing
# more reliable, but also easier to test.
# Are lucky today?
if earliestBlockTimestamp == epochTimeUInt256:
return earliestBlockNumber
if latestBlockTimestamp == epochTimeUInt256:
return latestBlockNumber
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
let availableHistoryInDays =
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
warn "Short block history detected.",
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
return earliestBlockNumber
return await provider.binarySearchBlockNumberForEpoch(
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
)
proc pastBlockTag*(
provider: Provider, blocksAgo: int
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
let head = await provider.getBlockNumber()
return BlockTag.init(head - blocksAgo.abs.u256)

View File

@ -1,206 +0,0 @@
import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/libp2p/[cid, multicodec]
import ../logutils
import ../utils/json
from ../errors import mapFailure
export contractabi
type
StorageRequest* = object
client* {.serialize.}: Address
ask* {.serialize.}: StorageAsk
content* {.serialize.}: StorageContent
expiry* {.serialize.}: uint64
nonce*: Nonce
StorageAsk* = object
proofProbability* {.serialize.}: UInt256
pricePerBytePerSecond* {.serialize.}: UInt256
collateralPerByte* {.serialize.}: UInt256
slots* {.serialize.}: uint64
slotSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
maxSlotLoss* {.serialize.}: uint64
StorageContent* = object
cid* {.serialize.}: Cid
merkleRoot*: array[32, byte]
Slot* = object
request* {.serialize.}: StorageRequest
slotIndex* {.serialize.}: uint64
SlotId* = distinct array[32, byte]
RequestId* = distinct array[32, byte]
Nonce* = distinct array[32, byte]
RequestState* {.pure.} = enum
New
Started
Cancelled
Finished
Failed
SlotState* {.pure.} = enum
Free
Filled
Finished
Failed
Paid
Cancelled
Repair
proc `==`*(x, y: Nonce): bool {.borrow.}
proc `==`*(x, y: RequestId): bool {.borrow.}
proc `==`*(x, y: SlotId): bool {.borrow.}
proc hash*(x: SlotId): Hash {.borrow.}
proc hash*(x: Nonce): Hash {.borrow.}
proc hash*(x: Address): Hash {.borrow.}
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
array[32, byte](id)
proc `$`*(id: RequestId | SlotId | Nonce): string =
id.toArray.toHex
proc fromHex*(T: type RequestId, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*(T: type SlotId, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*(T: type Nonce, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*[T: distinct](_: type T, hex: string): T =
type baseType = T.distinctBase
T baseType.fromHex(hex)
proc toHex*[T: distinct](id: T): string =
type baseType = T.distinctBase
baseType(id).toHex
logutils.formatIt(LogFormat.textLines, Nonce):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce):
it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId):
it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId):
it.to0xHexLog
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
StorageRequest(
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
)
func fromTuple(_: type Slot, tupl: tuple): Slot =
Slot(request: tupl[0], slotIndex: tupl[1])
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
StorageAsk(
proofProbability: tupl[0],
pricePerBytePerSecond: tupl[1],
collateralPerByte: tupl[2],
slots: tupl[3],
slotSize: tupl[4],
duration: tupl[5],
maxSlotLoss: tupl[6],
)
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
func solidityType*(_: type Cid): string =
solidityType(seq[byte])
func solidityType*(_: type StorageContent): string =
solidityType(StorageContent.fieldTypes)
func solidityType*(_: type StorageAsk): string =
solidityType(StorageAsk.fieldTypes)
func solidityType*(_: type StorageRequest): string =
solidityType(StorageRequest.fieldTypes)
# Note: it seems to be ok to ignore the vbuffer offset for now
func encode*(encoder: var AbiEncoder, cid: Cid) =
encoder.write(cid.data.buffer)
func encode*(encoder: var AbiEncoder, content: StorageContent) =
encoder.write(content.fieldValues)
func encode*(encoder: var AbiEncoder, ask: StorageAsk) =
encoder.write(ask.fieldValues)
func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
encoder.write(id.toArray)
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
encoder.write(request.fieldValues)
func encode*(encoder: var AbiEncoder, slot: Slot) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
let data = ?decoder.read(seq[byte])
Cid.init(data).mapFailure
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
let tupl = ?decoder.read(StorageContent.fieldTypes)
success StorageContent.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type StorageAsk): ?!T =
let tupl = ?decoder.read(StorageAsk.fieldTypes)
success StorageAsk.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T =
let tupl = ?decoder.read(StorageRequest.fieldTypes)
success StorageRequest.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
let tupl = ?decoder.read(Slot.fieldTypes)
success Slot.fromTuple(tupl)
func id*(request: StorageRequest): RequestId =
let encoding = AbiEncoder.encode((request,))
RequestId(keccak256.digest(encoding).data)
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
let encoding = AbiEncoder.encode((requestId, slotIndex))
SlotId(keccak256.digest(encoding).data)
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
slotId(request.id, slotIndex)
func id*(slot: Slot): SlotId =
slotId(slot.request, slot.slotIndex)
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
ask.pricePerBytePerSecond * ask.slotSize.u256
func pricePerSlot*(ask: StorageAsk): UInt256 =
ask.duration.u256 * ask.pricePerSlotPerSecond
func totalPrice*(ask: StorageAsk): UInt256 =
ask.slots.u256 * ask.pricePerSlot
func totalPrice*(request: StorageRequest): UInt256 =
request.ask.totalPrice
func collateralPerSlot*(ask: StorageAsk): UInt256 =
ask.collateralPerByte * ask.slotSize.u256
func size*(ask: StorageAsk): uint64 =
ask.slots * ask.slotSize

View File

@ -1,25 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import ./erasure/erasure
import ./erasure/backends/leopard
export erasure
func leoEncoderProvider*(
size, buffers, parity: int
): EncoderBackend {.raises: [Defect].} =
## create new Leo Encoder
LeoEncoderBackend.new(size, buffers, parity)
func leoDecoderProvider*(
size, buffers, parity: int
): DecoderBackend {.raises: [Defect].} =
## create new Leo Decoder
LeoDecoderBackend.new(size, buffers, parity)

View File

@ -1,47 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
import ../stores
type
ErasureBackend* = ref object of RootObj
blockSize*: int # block size in bytes
buffers*: int # number of original pieces
parity*: int # number of redundancy pieces
EncoderBackend* = ref object of ErasureBackend
DecoderBackend* = ref object of ErasureBackend
method release*(self: ErasureBackend) {.base, gcsafe.} =
## release the backend
##
raiseAssert("not implemented!")
method encode*(
self: EncoderBackend,
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## encode buffers using a backend
##
raiseAssert("not implemented!")
method decode*(
self: DecoderBackend,
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## decode buffers using a backend
##
raiseAssert("not implemented!")

View File

@ -1,79 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/options
import pkg/leopard
import pkg/results
import ../backend
type
LeoEncoderBackend* = ref object of EncoderBackend
encoder*: Option[LeoEncoder]
LeoDecoderBackend* = ref object of DecoderBackend
decoder*: Option[LeoDecoder]
method encode*(
self: LeoEncoderBackend,
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] =
## Encode data using Leopard backend
if parityLen == 0:
return ok()
var encoder =
if self.encoder.isNone:
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
self.encoder.get()
else:
self.encoder.get()
encoder.encode(data, parity, dataLen, parityLen)
method decode*(
self: LeoDecoderBackend,
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] =
## Decode data using given Leopard backend
var decoder =
if self.decoder.isNone:
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
self.decoder.get()
else:
self.decoder.get()
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
method release*(self: LeoEncoderBackend) =
if self.encoder.isSome:
self.encoder.get().free()
method release*(self: LeoDecoderBackend) =
if self.decoder.isSome:
self.decoder.get().free()
proc new*(
T: type LeoEncoderBackend, blockSize, buffers, parity: int
): LeoEncoderBackend =
## Create an instance of an Leopard Encoder backend
##
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
proc new*(
T: type LeoDecoderBackend, blockSize, buffers, parity: int
): LeoDecoderBackend =
## Create an instance of an Leopard Decoder backend
##
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)

View File

@ -1,678 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
import std/[sugar, atomics, sequtils]
import pkg/chronos
import pkg/chronos/threadsync
import pkg/chronicles
import pkg/libp2p/[multicodec, cid, multihash]
import pkg/libp2p/protobuf/minprotobuf
import pkg/taskpools
import ../logutils
import ../manifest
import ../merkletree
import ../stores
import ../blocktype as bt
import ../utils
import ../utils/asynciter
import ../indexingstrategy
import ../errors
import ../utils/arrayutils
import pkg/stew/byteutils
import ./backend
export backend
logScope:
topics = "codex erasure"
type
## Encode a manifest into one that is erasure protected.
##
## The new manifest has K `blocks` that are encoded into
## additional M `parity` blocks. The resulting dataset
## is padded with empty blocks if it doesn't have a square
## shape.
##
## NOTE: The padding blocks could be excluded
## from transmission, but they aren't for now.
##
## The resulting dataset is logically divided into rows
## where a row is made up of B blocks. There are then,
## K + M = N rows in total, each of length B blocks. Rows
## are assumed to be of the same number of (B) blocks.
##
## The encoding is systematic and the rows can be
## read sequentially by any node without decoding.
##
## Decoding is possible with any K rows or partial K
## columns (with up to M blocks missing per column),
## or any combination there of.
##
EncoderProvider* =
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
DecoderProvider* =
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
Erasure* = ref object
taskPool: Taskpool
encoderProvider*: EncoderProvider
decoderProvider*: DecoderProvider
store*: BlockStore
EncodingParams = object
ecK: Natural
ecM: Natural
rounded: Natural
steps: Natural
blocksCount: Natural
strategy: StrategyType
ErasureError* = object of CodexError
InsufficientBlocksError* = object of ErasureError
# Minimum size, in bytes, that the dataset must have had
# for the encoding request to have succeeded with the parameters
# provided.
minSize*: NBytes
EncodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen, parityLen: int
signal: ThreadSignalPtr
DecodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen: int
parityLen, recoveredLen: int
signal: ThreadSignalPtr
func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
## `idx` - the index to convert
## `step` - the current step
## `pos` - the position in the encoded dataset
##
(idx - step) div steps
proc getPendingBlocks(
self: Erasure, manifest: Manifest, indicies: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var
# request blocks from the store
pendingBlocks = indicies.map(
(i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
(r: ?!bt.Block) => (r, i)
) # Get the data blocks (first K)
)
proc isFinished(): bool =
pendingBlocks.len == 0
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
let completedFut = await one(pendingBlocks)
if (let i = pendingBlocks.find(completedFut); i >= 0):
pendingBlocks.del(i)
return await completedFut
else:
let (_, index) = await completedFut
raise newException(
CatchableError,
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
$index,
)
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
proc prepareEncodingData(
self: Erasure,
manifest: Manifest,
params: EncodingParams,
step: Natural,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!Natural] {.async.} =
## Prepare data for encoding
##
let
strategy = params.strategy.init(
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter =
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
cids[idx] = blk.cid
resolved.inc()
for idx in indicies.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
err:
return failure(err)
cids[idx] = emptyBlockCid
success(resolved.Natural)
proc prepareDecodingData(
self: Erasure,
encoded: Manifest,
step: Natural,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!(Natural, Natural)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
## `data` - the data to be prepared
## `parityData` - the parityData to be prepared
## `cids` - cids of prepared data
## `emptyBlock` - the empty block to be used for padding
##
let
strategy = encoded.protectedStrategy.init(
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
var
dataPieces = 0
parityPieces = 0
resolved = 0
for fut in pendingBlocksIter:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if resolved >= encoded.ecK:
break
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let pos = indexToPos(encoded.steps, idx, step)
logScope:
cid = blk.cid
idx = idx
pos = pos
step = step
empty = blk.isEmpty
cids[idx] = blk.cid
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
)
parityPieces.inc
else:
trace "Retrieved data block"
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
dataPieces.inc
resolved.inc
return success (dataPieces.Natural, parityPieces.Natural)
proc init*(
_: type EncodingParams,
manifest: Manifest,
ecK: Natural,
ecM: Natural,
strategy: StrategyType,
): ?!EncodingParams =
if ecK > manifest.blocksCount:
let exc = (ref InsufficientBlocksError)(
msg:
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
", blocksCount = " & $manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize,
)
return failure(exc)
let
rounded = roundUp(manifest.blocksCount, ecK)
steps = divUp(rounded, ecK)
blocksCount = rounded + (steps * ecM)
success EncodingParams(
ecK: ecK,
ecM: ecM,
rounded: rounded,
steps: steps,
blocksCount: blocksCount,
strategy: strategy,
)
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let encoder =
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
encoder.release()
discard task[].signal.fireSync()
if (
let res =
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
res.isErr
):
warn "Error from leopard encoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncEncode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var data = makeUncheckedArray(blocks)
defer:
dealloc(data)
## Create an ecode task with block data
var task = EncodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
blocks: data,
parity: parity,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding failed")
success()
proc encodeData(
self: Erasure, manifest: Manifest, params: EncodingParams
): Future[?!Manifest] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
logScope:
steps = params.steps
rounded_blocks = params.rounded
blocks_count = params.blocksCount
ecK = params.ecK
ecM = params.ecM
var
cids = seq[Cid].new()
emptyBlock = newSeq[byte](manifest.blockSize.int)
cids[].setLen(params.blocksCount)
try:
for step in 0 ..< params.steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
err:
trace "Unable to prepare data", error = err.msg
return failure(err)
trace "Erasure coding data", data = data[].len
try:
if err =? (
await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(parity, params.ecM)
var idx = params.rounded + step
for j in 0 ..< params.ecM:
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
error:
trace "Unable to create parity block", err = error.msg
return failure(error)
trace "Adding parity block", cid = blk.cid, idx
cids[idx] = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
idx.inc(params.steps)
without tree =? CodexTree.init(cids[]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
let encodedManifest = Manifest.new(
manifest = manifest,
treeCid = treeCid,
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
ecK = params.ecK,
ecM = params.ecM,
strategy = params.strategy,
)
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
success encodedManifest
except CancelledError as exc:
trace "Erasure coding encoding cancelled"
raise exc # cancellation needs to be propagated
except CatchableError as exc:
trace "Erasure coding encoding error", exc = exc.msg
return failure(exc)
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: Natural,
parity: Natural,
strategy = SteppedStrategy,
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
## `manifest` - the original manifest to be encoded
## `blocks` - the number of blocks to be encoded - K
## `parity` - the number of parity blocks to generate - M
##
without params =? EncodingParams.init(manifest, blocks.int, parity.int, strategy), err:
return failure(err)
without encodedManifest =? await self.encodeData(manifest, params), err:
return failure(err)
return success encodedManifest
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let decoder =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
decoder.release()
discard task[].signal.fireSync()
if (
let res = decoder.decode(
task[].blocks,
task[].parity,
task[].recovered,
task[].blocksLen,
task[].parityLen,
task[].recoveredLen,
)
res.isErr
):
warn "Error from leopard decoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncDecode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]],
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var
blockData = makeUncheckedArray(blocks)
parityData = makeUncheckedArray(parity)
defer:
dealloc(blockData)
dealloc(parityData)
## Create an decode task with block data
var task = DecodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
recoveredLen: blocksLen,
blocks: blockData,
parity: parityData,
recovered: recovered,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding failed")
success()
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.blocksCount
var
cids = seq[Cid].new()
recoveredIndices = newSeq[Natural]()
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
cids[].setLen(encoded.blocksCount)
try:
for step in 0 ..< encoded.steps:
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
var
data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
without (dataPieces, _) =? (
await self.prepareDecodingData(
encoded, step, data, parityData, cids, emptyBlock
)
), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
if dataPieces >= encoded.ecK:
trace "Retrieved all the required data blocks"
continue
trace "Erasure decoding data"
try:
if err =? (
await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(recovered, encoded.ecK)
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
if data[i].len <= 0 and not cids[idx].isEmpty:
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
without blk =? bt.Block.new(
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
), error:
trace "Unable to create block!", exc = error.msg
return failure(error)
trace "Recovered block", cid = blk.cid, index = i
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
trace "Erasure coding decoding cancelled"
raise exc # cancellation needs to be propagated
except CatchableError as exc:
trace "Erasure coding decoding error", exc = exc.msg
return failure(exc)
finally:
decoder.release()
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
let idxIter =
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
return failure(err)
let decoded = Manifest.new(encoded)
return decoded.success
proc start*(self: Erasure) {.async.} =
return
proc stop*(self: Erasure) {.async.} =
return
proc new*(
T: type Erasure,
store: BlockStore,
encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider,
taskPool: Taskpool,
): Erasure =
## Create a new Erasure instance for encoding and decoding manifests
##
Erasure(
store: store,
encoderProvider: encoderProvider,
decoderProvider: decoderProvider,
taskPool: taskPool,
)

View File

@ -1,100 +0,0 @@
import ./errors
import ./utils
import ./utils/asynciter
{.push raises: [].}
type
StrategyType* = enum
# Simplest approach:
# 0 => 0, 1, 2
# 1 => 3, 4, 5
# 2 => 6, 7, 8
LinearStrategy
# Stepped indexing:
# 0 => 0, 3, 6
# 1 => 1, 4, 7
# 2 => 2, 5, 8
SteppedStrategy
# Representing a strategy for grouping indices (of blocks usually)
# Given an interation-count as input, will produce a seq of
# selected indices.
IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError
IndexingStrategy* = object
strategyType*: StrategyType
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
step*: int
func checkIteration(
self: IndexingStrategy, iteration: int
): void {.raises: [IndexingError].} =
if iteration >= self.iterations:
raise newException(
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
)
func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}:
Iter[int].new(first, last, step)
func getLinearIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let
first = self.firstIndex + iteration * self.step
last = min(first + self.step - 1, self.lastIndex)
getIter(first, last, 1)
func getSteppedIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let
first = self.firstIndex + iteration
last = self.lastIndex
getIter(first, last, self.iterations)
func getIndicies*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
case self.strategyType
of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration)
of StrategyType.SteppedStrategy:
self.getSteppedIndicies(iteration)
func init*(
strategy: StrategyType, firstIndex, lastIndex, iterations: int
): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex:
raise newException(
IndexingWrongIndexError,
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
")",
)
if iterations <= 0:
raise newException(
IndexingWrongIterationsError,
"iterations (" & $iterations & ") must be greater than zero.",
)
IndexingStrategy(
strategyType: strategy,
firstIndex: firstIndex,
lastIndex: lastIndex,
iterations: iterations,
step: divUp((lastIndex - firstIndex + 1), iterations),
)

View File

@ -1,4 +0,0 @@
import ./manifest/coders
import ./manifest/manifest
export manifest, coders

View File

@ -1,260 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
# This module implements serialization and deserialization of Manifest
import pkg/upraises
import times
push:
{.upraises: [].}
import std/tables
import std/sequtils
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import pkg/chronos
import ./manifest
import ../errors
import ../blocktype
import ../logutils
import ../indexingstrategy
proc encode*(manifest: Manifest): ?!seq[byte] =
## Encode the manifest into a ``ManifestCodec``
## multicodec container (Dag-pb) for now
##
?manifest.verify()
var pbNode = initProtoBuffer()
# NOTE: The `Data` field in the the `dag-pb`
# contains the following protobuf `Message`
#
# ```protobuf
# Message VerificationInfo {
# bytes verifyRoot = 1; # Decimal encoded field-element
# repeated bytes slotRoots = 2; # Decimal encoded field-elements
# }
# Message ErasureInfo {
# optional uint32 ecK = 1; # number of encoded blocks
# optional uint32 ecM = 2; # number of parity blocks
# optional bytes originalTreeCid = 3; # cid of the original dataset
# optional uint32 originalDatasetSize = 4; # size of the original dataset
# optional VerificationInformation verification = 5; # verification information
# }
#
# Message Header {
# optional bytes treeCid = 1; # cid (root) of the tree
# optional uint32 blockSize = 2; # size of a single block
# optional uint64 datasetSize = 3; # size of the dataset
# optional codec: MultiCodec = 4; # Dataset codec
# optional hcodec: MultiCodec = 5 # Multihash codec
# optional version: CidVersion = 6; # Cid version
# optional ErasureInfo erasure = 7; # erasure coding info
# optional filename: ?string = 8; # original filename
# optional mimetype: ?string = 9; # original mimetype
# }
# ```
#
# var treeRootVBuf = initVBuffer()
var header = initProtoBuffer()
header.write(1, manifest.treeCid.data.buffer)
header.write(2, manifest.blockSize.uint32)
header.write(3, manifest.datasetSize.uint64)
header.write(4, manifest.codec.uint32)
header.write(5, manifest.hcodec.uint32)
header.write(6, manifest.version.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
erasureInfo.write(4, manifest.originalDatasetSize.uint64)
erasureInfo.write(5, manifest.protectedStrategy.uint32)
if manifest.verifiable:
var verificationInfo = initProtoBuffer()
verificationInfo.write(1, manifest.verifyRoot.data.buffer)
for slotRoot in manifest.slotRoots:
verificationInfo.write(2, slotRoot.data.buffer)
verificationInfo.write(3, manifest.cellSize.uint32)
verificationInfo.write(4, manifest.verifiableStrategy.uint32)
erasureInfo.write(6, verificationInfo)
erasureInfo.finish()
header.write(7, erasureInfo)
if manifest.filename.isSome:
header.write(8, manifest.filename.get())
if manifest.mimetype.isSome:
header.write(9, manifest.mimetype.get())
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
return pbNode.buffer.success
proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
## Decode a manifest from a data blob
##
var
pbNode = initProtoBuffer(data)
pbHeader: ProtoBuffer
pbErasureInfo: ProtoBuffer
pbVerificationInfo: ProtoBuffer
treeCidBuf: seq[byte]
originalTreeCid: seq[byte]
datasetSize: uint64
codec: uint32
hcodec: uint32
version: uint32
blockSize: uint32
originalDatasetSize: uint64
ecK, ecM: uint32
protectedStrategy: uint32
verifyRoot: seq[byte]
slotRoots: seq[seq[byte]]
cellSize: uint32
verifiableStrategy: uint32
filename: string
mimetype: string
# Decode `Header` message
if pbNode.getField(1, pbHeader).isErr:
return failure("Unable to decode `Header` from dag-pb manifest!")
# Decode `Header` contents
if pbHeader.getField(1, treeCidBuf).isErr:
return failure("Unable to decode `treeCid` from manifest!")
if pbHeader.getField(2, blockSize).isErr:
return failure("Unable to decode `blockSize` from manifest!")
if pbHeader.getField(3, datasetSize).isErr:
return failure("Unable to decode `datasetSize` from manifest!")
if pbHeader.getField(4, codec).isErr:
return failure("Unable to decode `codec` from manifest!")
if pbHeader.getField(5, hcodec).isErr:
return failure("Unable to decode `hcodec` from manifest!")
if pbHeader.getField(6, version).isErr:
return failure("Unable to decode `version` from manifest!")
if pbHeader.getField(7, pbErasureInfo).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
if pbHeader.getField(8, filename).isErr:
return failure("Unable to decode `filename` from manifest!")
if pbHeader.getField(9, mimetype).isErr:
return failure("Unable to decode `mimetype` from manifest!")
let protected = pbErasureInfo.buffer.len > 0
var verifiable = false
if protected:
if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalTreeCid).isErr:
return failure("Unable to decode `originalTreeCid` from manifest!")
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
return failure("Unable to decode `originalDatasetSize` from manifest!")
if pbErasureInfo.getField(5, protectedStrategy).isErr:
return failure("Unable to decode `protectedStrategy` from manifest!")
if pbErasureInfo.getField(6, pbVerificationInfo).isErr:
return failure("Unable to decode `verificationInfo` from manifest!")
verifiable = pbVerificationInfo.buffer.len > 0
if verifiable:
if pbVerificationInfo.getField(1, verifyRoot).isErr:
return failure("Unable to decode `verifyRoot` from manifest!")
if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr:
return failure("Unable to decode `slotRoots` from manifest!")
if pbVerificationInfo.getField(3, cellSize).isErr:
return failure("Unable to decode `cellSize` from manifest!")
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
return failure("Unable to decode `verifiableStrategy` from manifest!")
let treeCid = ?Cid.init(treeCidBuf).mapFailure
var filenameOption = if filename.len == 0: string.none else: filename.some
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
let self =
if protected:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
ecK = ecK.int,
ecM = ecM.int,
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes,
strategy = StrategyType(protectedStrategy),
filename = filenameOption,
mimetype = mimetypeOption,
)
else:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
)
?self.verify()
if verifiable:
let
verifyRootCid = ?Cid.init(verifyRoot).mapFailure
slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure)
return Manifest.new(
manifest = self,
verifyRoot = verifyRootCid,
slotRoots = slotRootCids,
cellSize = cellSize.NBytes,
strategy = StrategyType(verifiableStrategy),
)
self.success
func decode*(_: type Manifest, blk: Block): ?!Manifest =
## Decode a manifest using `decoder`
##
if not ?blk.cid.isManifest:
return failure "Cid not a manifest codec"
Manifest.decode(blk.data)

View File

@ -1,368 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
# This module defines all operations on Manifest
import pkg/upraises
push:
{.upraises: [].}
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec]
import pkg/questionable/results
import ../errors
import ../utils
import ../utils/json
import ../units
import ../blocktype
import ../indexingstrategy
import ../logutils
# TODO: Manifest should be reworked to more concrete types,
# perhaps using inheritance
type Manifest* = ref object of RootObj
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes
# Size of each contained block (might not be needed if blocks are len-prefixed)
codec: MultiCodec # Dataset codec
hcodec: MultiCodec # Multihash codec
version: CidVersion # Cid version
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
case protected {.serialize.}: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
case verifiable {.serialize.}: bool
# Verifiable datasets can be used to generate storage proofs
of true:
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
cellSize: NBytes # Size of each slot cell
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
else:
discard
else:
discard
############################################################
# Accessors
############################################################
func blockSize*(self: Manifest): NBytes =
self.blockSize
func datasetSize*(self: Manifest): NBytes =
self.datasetSize
func version*(self: Manifest): CidVersion =
self.version
func hcodec*(self: Manifest): MultiCodec =
self.hcodec
func codec*(self: Manifest): MultiCodec =
self.codec
func protected*(self: Manifest): bool =
self.protected
func ecK*(self: Manifest): int =
self.ecK
func ecM*(self: Manifest): int =
self.ecM
func originalTreeCid*(self: Manifest): Cid =
self.originalTreeCid
func originalBlocksCount*(self: Manifest): int =
divUp(self.originalDatasetSize.int, self.blockSize.int)
func originalDatasetSize*(self: Manifest): NBytes =
self.originalDatasetSize
func treeCid*(self: Manifest): Cid =
self.treeCid
func blocksCount*(self: Manifest): int =
divUp(self.datasetSize.int, self.blockSize.int)
func verifiable*(self: Manifest): bool =
bool (self.protected and self.verifiable)
func verifyRoot*(self: Manifest): Cid =
self.verifyRoot
func slotRoots*(self: Manifest): seq[Cid] =
self.slotRoots
func numSlots*(self: Manifest): int =
self.ecK + self.ecM
func cellSize*(self: Manifest): NBytes =
self.cellSize
func protectedStrategy*(self: Manifest): StrategyType =
self.protectedStrategy
func verifiableStrategy*(self: Manifest): StrategyType =
self.verifiableStrategy
func numSlotBlocks*(self: Manifest): int =
divUp(self.blocksCount, self.numSlots)
func filename*(self: Manifest): ?string =
self.filename
func mimetype*(self: Manifest): ?string =
self.mimetype
############################################################
# Operations on block list
############################################################
func isManifest*(cid: Cid): ?!bool =
success (ManifestCodec == ?cid.contentType().mapFailure(CodexError))
func isManifest*(mc: MultiCodec): ?!bool =
success mc == ManifestCodec
############################################################
# Various sizes and verification
############################################################
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalBlocksCount, self.ecK)
func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest
divUp(self.rounded, self.ecK)
func verify*(self: Manifest): ?!void =
## Check manifest correctness
##
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
func `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
(a.mimetype == b.mimetype) and (
if a.protected:
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize) and
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
(
if a.verifiable:
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and (
a.verifiableStrategy == b.verifiableStrategy
)
else:
true
)
else:
true
)
func `$`*(self: Manifest): string =
result =
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
", codec: " & $self.codec & ", protected: " & $self.protected
if self.filename.isSome:
result &= ", filename: " & $self.filename
if self.mimetype.isSome:
result &= ", mimetype: " & $self.mimetype
result &= (
if self.protected:
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable & (
if self.verifiable:
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
else:
""
)
else:
""
)
return result
############################################################
# Constructors
############################################################
func new*(
T: type Manifest,
treeCid: Cid,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = Sha256HashCodec,
codec = BlockCodec,
protected = false,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
T(
treeCid: treeCid,
blockSize: blockSize,
datasetSize: datasetSize,
version: version,
codec: codec,
hcodec: hcodec,
protected: protected,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int,
strategy = SteppedStrategy,
): Manifest =
## Create an erasure protected dataset from an
## unprotected one
##
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK,
ecM: ecM,
originalTreeCid: manifest.treeCid,
originalDatasetSize: manifest.datasetSize,
protectedStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(T: type Manifest, manifest: Manifest): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
Manifest(
treeCid: manifest.originalTreeCid,
datasetSize: manifest.originalDatasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: false,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(
T: type Manifest,
treeCid: Cid,
datasetSize: NBytes,
blockSize: NBytes,
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes,
strategy = SteppedStrategy,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
blockSize: blockSize,
version: version,
hcodec: hcodec,
codec: codec,
protected: true,
ecK: ecK,
ecM: ecM,
originalTreeCid: originalTreeCid,
originalDatasetSize: originalDatasetSize,
protectedStrategy: strategy,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
manifest: Manifest,
verifyRoot: Cid,
slotRoots: openArray[Cid],
cellSize = DefaultCellSize,
strategy = LinearStrategy,
): ?!Manifest =
## Create a verifiable dataset from an
## protected one
##
if not manifest.protected:
return failure newException(
CodexError, "Can create verifiable manifest only from protected manifest."
)
if slotRoots.len != manifest.numSlots:
return failure newException(CodexError, "Wrong number of slot roots.")
success Manifest(
treeCid: manifest.treeCid,
datasetSize: manifest.datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: manifest.ecK,
ecM: manifest.ecM,
originalTreeCid: manifest.originalTreeCid,
originalDatasetSize: manifest.originalDatasetSize,
protectedStrategy: manifest.protectedStrategy,
verifiable: true,
verifyRoot: verifyRoot,
slotRoots: @slotRoots,
cellSize: cellSize,
verifiableStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
## Create a manifest instance from given data
##
Manifest.decode(data)

View File

@ -1,314 +0,0 @@
import pkg/chronos
import pkg/upraises
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
import ./contracts/proofs
import ./clock
import ./errors
import ./periods
export chronos
export questionable
export requests
export proofs
export SecondsSince1970
export periods
type
Market* = ref object of RootObj
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
MarketplaceEvent* = Event
StorageRequested* = object of MarketplaceEvent
requestId*: RequestId
ask*: StorageAsk
expiry*: uint64
SlotFilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotFreed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotReservationsFull* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
RequestFulfilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestCancelled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestFailed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
ProofSubmitted* = object of MarketplaceEvent
id*: SlotId
method loadConfig*(
market: Market
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method getZkeyHash*(
market: Market
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getSigner*(
market: Market
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method periodicity*(
market: Market
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method proofTimeout*(
market: Market
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method repairRewardPercentage*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
raiseAssert("not implemented")
method proofDowntime*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
raiseAssert("not implemented")
proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
let downtime = await market.proofDowntime
let pntr = await market.getPointer(slotId)
return pntr < downtime
method requestStorage*(
market: Market, request: StorageRequest
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
raiseAssert("not implemented")
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
raiseAssert("not implemented")
method getRequest*(
market: Market, id: RequestId
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method requestState*(
market: Market, requestId: RequestId
): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented")
method slotState*(
market: Market, slotId: SlotId
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getRequestEnd*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method requestExpiresAt*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method getHost*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method currentCollateral*(
market: Market, slotId: SlotId
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
raiseAssert("not implemented")
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
raiseAssert("not implemented")
method fillSlot*(
market: Market,
requestId: RequestId,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method freeSlot*(
market: Market, slotId: SlotId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method withdrawFunds*(
market: Market, requestId: RequestId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method subscribeRequests*(
market: Market, callback: OnRequest
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method getChallenge*(
market: Market, id: SlotId
): Future[ProofChallenge] {.base, async.} =
raiseAssert("not implemented")
method submitProof*(
market: Market, id: SlotId, proof: Groth16Proof
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method markProofAsMissing*(
market: Market, id: SlotId, period: Period
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canMarkProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method reserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canReserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(
market: Market, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(
market: Market, requestId: RequestId, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(
market: Market, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFreed*(
market: Market, callback: OnSlotFreed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotReservationsFull*(
market: Market, callback: OnSlotReservationsFull
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(
market: Market, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(
market: Market, requestId: RequestId, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(
market: Market, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(
market: Market, requestId: RequestId, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeProofSubmission*(
market: Market, callback: OnProofSubmitted
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, blocksAgo: int
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.base, gcsafe, raises: [].} =
raiseAssert("not implemented")

View File

@ -1,10 +0,0 @@
import ./merkletree/merkletree
import ./merkletree/codex
import ./merkletree/poseidon2
export codex, poseidon2, merkletree
type
SomeMerkleTree* = ByteTree | CodexTree | Poseidon2Tree
SomeMerkleProof* = ByteProof | CodexProof | Poseidon2Proof
SomeMerkleHash* = ByteHash | Poseidon2Hash

View File

@ -1,4 +0,0 @@
import ./codex/codex
import ./codex/coders
export codex, coders

View File

@ -1,243 +0,0 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/bitops
import std/sequtils
import pkg/questionable
import pkg/questionable/results
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/constantine/hashes
import ../../utils
import ../../rng
import ../../errors
import ../../blocktype
from ../../utils/digest import digestBytes
import ../merkletree
export merkletree
logScope:
topics = "codex merkletree"
type
ByteTreeKey* {.pure.} = enum
KeyNone = 0x0.byte
KeyBottomLayer = 0x1.byte
KeyOdd = 0x2.byte
KeyOddAndBottomLayer = 0x3.byte
ByteHash* = seq[byte]
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
ByteProof* = MerkleProof[ByteHash, ByteTreeKey]
CodexTree* = ref object of ByteTree
mcodec*: MultiCodec
CodexProof* = ref object of ByteProof
mcodec*: MultiCodec
# CodeHashes is not exported from libp2p
# So we need to recreate it instead of
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var proof = CodexProof(mcodec: self.mcodec)
?self.getProof(index, proof)
success proof
func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
## Verify hash
##
let
rootBytes = root.digestBytes
leafBytes = leaf.digestBytes
if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec:
return failure "Hash codec mismatch"
if rootBytes.len != root.size and leafBytes.len != leaf.size:
return failure "Invalid hash length"
self.verify(leafBytes, rootBytes)
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure)
proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid =
if (?self.root).len == 0:
return failure "Empty root"
let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure
Cid.init(version, DatasetRootCodec, mhash).mapFailure
func getLeafCid*(
self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec
): ?!Cid =
if i >= self.leavesCount:
return failure "Invalid leaf index " & $i
let
leaf = self.leaves[i]
mhash = ?MultiHash.init($self.mcodec, leaf).mapFailure
Cid.init(version, dataCodec, mhash).mapFailure
proc `$`*(self: CodexTree): string =
let root =
if self.root.isOk:
byteutils.toHex(self.root.get)
else:
"none"
"CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " &
$self.levels & ", mcodec: " & $self.mcodec & " )"
proc `$`*(self: CodexProof): string =
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
## Compress two hashes
##
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/codex-storage/nim-codex/issues/1162
let input = @x & @y & @[key.byte]
var digest = hashes.sha256.hash(input)
success @digest
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
if mhash.size != leaves[0].len:
return failure "Invalid hash length"
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = leaves[0].mcodec
leaves = leaves.mapIt(it.digestBytes)
CodexTree.init(mcodec, leaves)
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = (?leaves[0].mhash.mapFailure).mcodec
leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes)
CodexTree.init(mcodec, leaves)
proc fromNodes*(
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
nodes: openArray[ByteHash],
nleaves: int,
): ?!CodexTree =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
if mhash.size != nodes[0].len:
return failure "Invalid hash length"
var
self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ?self.getProof(index)
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
return failure "Unable to verify tree built from nodes"
success self
func init*(
_: type CodexProof,
mcodec: MultiCodec = Sha256HashCodec,
index: int,
nleaves: int,
nodes: openArray[ByteHash],
): ?!CodexProof =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
success CodexProof(
compress: compressor,
zero: Zero,
mcodec: mcodec,
index: index,
nleaves: nleaves,
path: @nodes,
)

View File

@ -1,153 +0,0 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/bitops
import pkg/questionable/results
import ../errors
type
CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
MerkleTree*[H, K] = ref object of RootObj
layers*: seq[seq[H]]
compress*: CompressFn[H, K]
zero*: H
MerkleProof*[H, K] = ref object of RootObj
index*: int # linear index of the leaf, starting from 0
path*: seq[H] # order: from the bottom to the top
nleaves*: int # number of leaves in the tree (=size of input)
compress*: CompressFn[H, K] # compress function
zero*: H # zero value
func depth*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len - 1
func leavesCount*[H, K](self: MerkleTree[H, K]): int =
return self.layers[0].len
func levels*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] =
return self.layers[0]
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] =
for layer in self.layers:
yield layer
iterator nodes*[H, K](self: MerkleTree[H, K]): H =
for layer in self.layers:
for node in layer:
yield node
func root*[H, K](self: MerkleTree[H, K]): ?!H =
let last = self.layers[^1]
if last.len != 1:
return failure "invalid tree"
return success last[0]
func getProof*[H, K](
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
): ?!void =
let depth = self.depth
let nleaves = self.leavesCount
if not (index >= 0 and index < nleaves):
return failure "index out of bounds"
var path: seq[H] = newSeq[H](depth)
var k = index
var m = nleaves
for i in 0 ..< depth:
let j = k xor 1
path[i] =
if (j < m):
self.layers[i][j]
else:
self.zero
k = k shr 1
m = (m + 1) shr 1
proof.index = index
proof.path = path
proof.nleaves = nleaves
proof.compress = self.compress
success()
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
var proof = MerkleProof[H, K]()
?self.getProof(index, proof)
success proof
func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
var
m = proof.nleaves
j = proof.index
h = leaf
bottomFlag = K.KeyBottomLayer
for p in proof.path:
let oddIndex: bool = (bitand(j, 1) != 0)
if oddIndex:
# the index of the child is odd, so the node itself can't be odd (a bit counterintuitive, yeah :)
h = ?proof.compress(p, h, bottomFlag)
else:
if j == m - 1:
# single child => odd node
h = ?proof.compress(h, p, K(bottomFlag.ord + 2))
else:
# even node
h = ?proof.compress(h, p, bottomFlag)
bottomFlag = K.KeyNone
j = j shr 1
m = (m + 1) shr 1
return success h
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
success bool(root == ?proof.reconstructRoot(leaf))
func merkleTreeWorker*[H, K](
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
): ?!seq[seq[H]] =
let a = low(xs)
let b = high(xs)
let m = b - a + 1
when not isBottomLayer:
if m == 1:
return success @[@xs]
let halfn: int = m div 2
let n: int = 2 * halfn
let isOdd: bool = (n != m)
var ys: seq[H]
if not isOdd:
ys = newSeq[H](halfn)
else:
ys = newSeq[H](halfn + 1)
for i in 0 ..< halfn:
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key)
if isOdd:
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
ys[halfn] = ?self.compress(xs[n], self.zero, key = key)
success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false)

View File

@ -1,130 +0,0 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sequtils
import pkg/poseidon2
import pkg/constantine/math/io/io_fields
import pkg/constantine/platforms/abstractions
import pkg/questionable/results
import ../utils
import ../rng
import ./merkletree
export merkletree, poseidon2
const
KeyNoneF = F.fromHex("0x0")
KeyBottomLayerF = F.fromHex("0x1")
KeyOddF = F.fromHex("0x2")
KeyOddAndBottomLayerF = F.fromHex("0x3")
Poseidon2Zero* = zero
type
Bn254Fr* = F
Poseidon2Hash* = Bn254Fr
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
KeyNone
KeyBottomLayer
KeyOdd
KeyOddAndBottomLayer
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
proc `$`*(self: Poseidon2Tree): string =
let root = if self.root.isOk: self.root.get.toHex else: "none"
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
", levels: " & $self.levels & " )"
proc `$`*(self: Poseidon2Proof): string =
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
", path: " & $self.path.mapIt(it.toHex) & " )"
func toArray32*(bytes: openArray[byte]): array[32, byte] =
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
case key
of KeyNone: KeyNoneF
of KeyBottomLayer: KeyBottomLayerF
of KeyOdd: KeyOddF
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
if leaves.len == 0:
return failure "Empty leaves"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
proc fromNodes*(
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
): ?!Poseidon2Tree =
if nodes.len == 0:
return failure "Empty nodes"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
self = Poseidon2Tree(compress: compressor, zero: zero)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ?self.getProof(index)
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
return failure "Unable to verify tree built from nodes"
success self
func init*(
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
): ?!Poseidon2Proof =
if nodes.len == 0:
return failure "Empty nodes"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
success Poseidon2Proof(
compress: compressor,
zero: Poseidon2Zero,
index: index,
nleaves: nleaves,
path: @nodes,
)

View File

@ -1,25 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
const
# Namespaces
CodexMetaNamespace* = "meta" # meta info stored here
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total"
# number of blocks in the repo
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
CodexBlocksTtlNamespace* = # Cid TTL
CodexMetaNamespace & "/ttl"
CodexBlockProofNamespace* = # Cid and Proof
CodexMetaNamespace & "/proof"
CodexDhtNamespace* = "dht" # Dht namespace
CodexDhtProvidersNamespace* = # Dht providers namespace
CodexDhtNamespace & "/providers"
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace

View File

@ -1,893 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/options
import std/sequtils
import std/strformat
import std/sugar
import times
import pkg/taskpools
import pkg/questionable
import pkg/questionable/results
import pkg/chronos
import pkg/poseidon2
import pkg/libp2p/[switch, multicodec, multihash]
import pkg/libp2p/stream/bufferstream
# TODO: remove once exported by libp2p
import pkg/libp2p/routing_record
import pkg/libp2p/signed_envelope
import ./chunker
import ./slots
import ./clock
import ./blocktype as bt
import ./manifest
import ./merkletree
import ./stores
import ./blockexchange
import ./streams
import ./erasure
import ./discovery
import ./contracts
import ./indexingstrategy
import ./utils
import ./errors
import ./logutils
import ./utils/asynciter
import ./utils/trackedfutures
export logutils
logScope:
topics = "codex node"
const DefaultFetchBatch = 10
type
Contracts* =
tuple[
client: ?ClientInteractions,
host: ?HostInteractions,
validator: ?ValidatorInteractions,
]
CodexNode* = object
switch: Switch
networkId: PeerId
networkStore: NetworkStore
engine: BlockExcEngine
prover: ?Prover
discovery: Discovery
contracts*: Contracts
clock*: Clock
storage*: Contracts
taskpool: Taskpool
trackedFutures: TrackedFutures
CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
func switch*(self: CodexNodeRef): Switch =
return self.switch
func blockStore*(self: CodexNodeRef): BlockStore =
return self.networkStore
func engine*(self: CodexNodeRef): BlockExcEngine =
return self.engine
func discovery*(self: CodexNodeRef): Discovery =
return self.discovery
proc storeManifest*(
self: CodexNodeRef, manifest: Manifest
): Future[?!bt.Block] {.async.} =
without encodedVerifiable =? manifest.encode(), err:
trace "Unable to encode manifest"
return failure(err)
without blk =? bt.Block.new(data = encodedVerifiable, codec = ManifestCodec), error:
trace "Unable to create block from manifest"
return failure(error)
if err =? (await self.networkStore.putBlock(blk)).errorOption:
trace "Unable to store manifest block", cid = blk.cid, err = err.msg
return failure(err)
success blk
proc fetchManifest*(
self: CodexNodeRef, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
## Fetch and decode a manifest block
##
if err =? cid.isManifest.errorOption:
return failure "CID has invalid content type for manifest {$cid}"
trace "Retrieving manifest for cid", cid
without blk =? await self.networkStore.getBlock(BlockAddress.init(cid)), err:
trace "Error retrieve manifest block", cid, err = err.msg
return failure err
trace "Decoding manifest for cid", cid
without manifest =? Manifest.decode(blk), err:
trace "Unable to decode as manifest", err = err.msg
return failure("Unable to decode as manifest")
trace "Decoded manifest", cid
return manifest.success
proc findPeer*(self: CodexNodeRef, peerId: PeerId): Future[?PeerRecord] {.async.} =
## Find peer using the discovery service from the given CodexNode
##
return await self.discovery.findPeer(peerId)
proc connect*(
self: CodexNodeRef, peerId: PeerId, addrs: seq[MultiAddress]
): Future[void] =
self.switch.connect(peerId, addrs)
proc updateExpiry*(
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raises: [CancelledError]).} =
without manifest =? await self.fetchManifest(manifestCid), error:
trace "Unable to fetch manifest for cid", manifestCid
return failure(error)
try:
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
)
let res = await allFinishedFailed[?!void](ensuringFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure(exc.msg)
return success()
proc fetchBatched*(
self: CodexNodeRef,
cid: Cid,
iter: Iter[int],
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raises: [CancelledError]), gcsafe.} =
## Fetch blocks in batches of `batchSize`
##
# TODO: doesn't work if callee is annotated with async
# let
# iter = iter.map(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
while not iter.finished:
let blockFutures = collect:
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
if blockFutures.len == 0:
continue
without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err:
trace "Some blocks failed to fetch", err = err.msg
return failure(err)
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value)
let numOfFailedBlocks = blockResults.len - blocks.len
if numOfFailedBlocks > 0:
return
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")")
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption:
return failure(batchErr)
if not iter.finished:
await sleepAsync(1.millis)
success()
proc fetchBatched*(
self: CodexNodeRef,
manifest: Manifest,
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
## Fetch manifest in batches of `batchSize`
##
trace "Fetching blocks in batches of",
size = batchSize, blocksCount = manifest.blocksCount
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc fetchDatasetAsync*(
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc streamSingleBlock(
self: CodexNodeRef, cid: Cid
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of a single block.
##
trace "Streaming single block", cid = cid
let stream = BufferStream.new()
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try:
defer:
await stream.pushEof()
await stream.pushData(blk.data)
except CancelledError as exc:
trace "Streaming block cancelled", cid, exc = exc.msg
except LPStreamError as exc:
trace "Unable to send block", cid, exc = exc.msg
self.trackedFutures.track(streamOneBlock())
LPStream(stream).success
proc streamEntireDataset(
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of the entire dataset described by the manifest.
##
trace "Retrieving blocks from manifest", manifestCid
var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async: (raises: []).} =
try:
# Spawn an erasure decoding job
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CatchableError as exc:
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
jobs.add(erasureJob())
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async: (raises: []).} =
try:
await stream.join()
except CancelledError as exc:
warn "Stream cancelled", exc = exc.msg
finally:
await noCancel allFutures(jobs.mapIt(it.cancelAndWait))
self.trackedFutures.track(monitorStream())
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid
stream.success
proc retrieve*(
self: CodexNodeRef, cid: Cid, local: bool = true
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Retrieve by Cid a single block or an entire dataset described by manifest
##
if local and not await (cid in self.networkStore):
return failure((ref BlockNotFoundError)(msg: "Block not found in local store"))
without manifest =? (await self.fetchManifest(cid)), err:
if err of AsyncTimeoutError:
return failure(err)
return await self.streamSingleBlock(cid)
await self.streamEntireDataset(manifest, cid)
proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} =
if err =? (await self.networkStore.delBlock(cid)).errorOption:
error "Error deleting block", cid, err = err.msg
return failure(err)
trace "Deleted block", cid
return success()
proc deleteEntireDataset(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} =
# Deletion is a strictly local operation
var store = self.networkStore.localStore
if not (await cid in store):
# As per the contract for delete*, an absent dataset is not an error.
return success()
without manifestBlock =? await store.getBlock(cid), err:
return failure(err)
without manifest =? Manifest.decode(manifestBlock), err:
return failure(err)
let runtimeQuota = initDuration(milliseconds = 100)
var lastIdle = getTime()
for i in 0 ..< manifest.blocksCount:
if (getTime() - lastIdle) >= runtimeQuota:
await idleAsync()
lastIdle = getTime()
if err =? (await store.delBlock(manifest.treeCid, i)).errorOption:
# The contract for delBlock is fuzzy, but we assume that if the block is
# simply missing we won't get an error. This is a best effort operation and
# can simply be retried.
error "Failed to delete block within dataset", index = i, err = err.msg
return failure(err)
if err =? (await store.delBlock(cid)).errorOption:
error "Error deleting manifest block", err = err.msg
success()
proc delete*(
self: CodexNodeRef, cid: Cid
): Future[?!void] {.async: (raises: [CatchableError]).} =
## Deletes a whole dataset, if Cid is a Manifest Cid, or a single block, if Cid a block Cid,
## from the underlying block store. This is a strictly local operation.
##
## Missing blocks in dataset deletes are ignored.
##
without isManifest =? cid.isManifest, err:
trace "Bad content type for CID:", cid = cid, err = err.msg
return failure(err)
if not isManifest:
return await self.deleteSingleBlock(cid)
await self.deleteEntireDataset(cid)
proc store*(
self: CodexNodeRef,
stream: LPStream,
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize,
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest
##
info "Storing data"
let
hcodec = Sha256HashCodec
dataCodec = BlockCodec
chunker = LPStreamChunker.new(stream, chunkSize = blockSize)
var cids: seq[Cid]
try:
while (let chunk = await chunker.getBytes(); chunk.len > 0):
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
return failure(err)
without cid =? Cid.init(CIDv1, dataCodec, mhash).mapFailure, err:
return failure(err)
without blk =? bt.Block.new(cid, chunk, verify = false):
return failure("Unable to init block from chunk!")
cids.add(cid)
if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure(exc.msg)
finally:
await stream.close()
without tree =? CodexTree.init(cids), err:
return failure(err)
without treeCid =? tree.rootCid(CIDv1, dataCodec), err:
return failure(err)
for index, cid in cids:
without proof =? tree.getProof(index), err:
return failure(err)
if err =?
(await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
# TODO add log here
return failure(err)
let manifest = Manifest.new(
treeCid = treeCid,
blockSize = blockSize,
datasetSize = NBytes(chunker.offset),
version = CIDv1,
hcodec = hcodec,
codec = dataCodec,
filename = filename,
mimetype = mimetype,
)
without manifestBlk =? await self.storeManifest(manifest), err:
error "Unable to store manifest"
return failure(err)
info "Stored data",
manifestCid = manifestBlk.cid,
treeCid = treeCid,
blocks = manifest.blocksCount,
datasetSize = manifest.datasetSize,
filename = manifest.filename,
mimetype = manifest.mimetype
return manifestBlk.cid.success
proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
without cidsIter =? await self.networkStore.listBlocks(BlockType.Manifest):
warn "Failed to listBlocks"
return
for c in cidsIter:
if cid =? await c:
without blk =? await self.networkStore.getBlock(cid):
warn "Failed to get manifest block by cid", cid
return
without manifest =? Manifest.decode(blk):
warn "Failed to decode manifest", cid
return
onManifest(cid, manifest)
proc setupRequest(
self: CodexNodeRef,
cid: Cid,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!StorageRequest] {.async.} =
## Setup slots for a given dataset
##
let
ecK = nodes - tolerance
ecM = tolerance
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateralPerByte = collateralPerByte
expiry = expiry
ecK = ecK
ecM = ecM
trace "Setting up slots"
without manifest =? await self.fetchManifest(cid), error:
trace "Unable to fetch manifest for cid"
return failure error
# Erasure code the dataset according to provided parameters
let erasure = Erasure.new(
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
trace "Unable to erasure code dataset"
return failure(error)
without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err:
trace "Unable to create slot builder"
return failure(err)
without verifiable =? (await builder.buildManifest()), err:
trace "Unable to build verifiable manifest"
return failure(err)
without manifestBlk =? await self.storeManifest(verifiable), err:
trace "Unable to store verifiable manifest"
return failure(err)
let
verifyRoot =
if builder.verifyRoot.isNone:
return failure("No slots root")
else:
builder.verifyRoot.get.toBytes
request = StorageRequest(
ask: StorageAsk(
slots: verifiable.numSlots.uint64,
slotSize: builder.slotBytes.uint64,
duration: duration,
proofProbability: proofProbability,
pricePerBytePerSecond: pricePerBytePerSecond,
collateralPerByte: collateralPerByte,
maxSlotLoss: tolerance,
),
content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot),
expiry: expiry,
)
trace "Request created", request = $request
success request
proc requestStorage*(
self: CodexNodeRef,
cid: Cid,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!PurchaseId] {.async.} =
## Initiate a request for storage sequence, this might
## be a multistep procedure.
##
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateralPerByte = collateralPerByte
expiry = expiry
now = self.clock.now
trace "Received a request for storage!"
without contracts =? self.contracts.client:
trace "Purchasing not available"
return failure "Purchasing not available"
without request =? (
await self.setupRequest(
cid, duration, proofProbability, nodes, tolerance, pricePerBytePerSecond,
collateralPerByte, expiry,
)
), err:
trace "Unable to setup request"
return failure err
let purchase = await contracts.purchasing.purchase(request)
success purchase.id
proc onStore(
self: CodexNodeRef,
request: StorageRequest,
expiry: SecondsSince1970,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
): Future[?!void] {.async: (raises: [CancelledError]).} =
## store data in local storage
##
let cid = request.content.cid
logScope:
cid = $cid
slotIdx = slotIdx
trace "Received a request to store a slot"
# TODO: Use the isRepairing to manage the slot download.
# If isRepairing is true, the slot has to be repaired before
# being downloaded.
without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err)
without builder =?
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
trace "Unable to create slots builder", err = err.msg
return failure(err)
if slotIdx > manifest.slotRoots.high.uint64:
trace "Slot index not in manifest", slotIdx
return failure(newException(CodexError, "Slot index not in manifest"))
proc updateExpiry(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg
return failure(err)
return success()
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err:
trace "Unable to get indicies from strategy", err = err.msg
return failure(err)
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
trace "Unable to build slot", err = err.msg
return failure(err)
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
return failure(newException(CodexError, "Slot root mismatch"))
trace "Slot successfully retrieved and reconstructed"
return success()
proc onProve(
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
## Generats a proof for a given slot and challenge
##
let
cidStr = $slot.request.content.cid
slotIdx = slot.slotIndex
logScope:
cid = cidStr
slot = slotIdx
challenge = challenge
trace "Received proof challenge"
if prover =? self.prover:
trace "Prover enabled"
without cid =? Cid.init(cidStr).mapFailure, err:
error "Unable to parse Cid", cid, err = err.msg
return failure(err)
without manifest =? await self.fetchManifest(cid), err:
error "Unable to fetch manifest for cid", err = err.msg
return failure(err)
when defined(verify_circuit):
without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge),
err:
error "Unable to generate proof", err = err.msg
return failure(err)
without checked =? await prover.verify(proof, inputs), err:
error "Unable to verify proof", err = err.msg
return failure(err)
if not checked:
error "Proof verification failed"
return failure("Proof verification failed")
trace "Proof verified successfully"
else:
without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err:
error "Unable to generate proof", err = err.msg
return failure(err)
let groth16Proof = proof.toGroth16Proof()
trace "Proof generated successfully", groth16Proof
success groth16Proof
else:
warn "Prover not enabled"
failure "Prover not enabled"
proc onExpiryUpdate(
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateExpiry(rootCid, expiry)
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
discard
proc start*(self: CodexNodeRef) {.async.} =
if not self.engine.isNil:
await self.engine.start()
if not self.discovery.isNil:
await self.discovery.start()
if not self.clock.isNil:
await self.clock.start()
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onStore(request, expiry, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
self.onClear(request, slotIndex)
hostContracts.sales.onProve = proc(
slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
# TODO: generate proof
self.onProve(slot, challenge)
try:
await hostContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start host contract interactions", error = error.msg
self.contracts.host = HostInteractions.none
if clientContracts =? self.contracts.client:
try:
await clientContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start client contract interactions: ", error = error.msg
self.contracts.client = ClientInteractions.none
if validatorContracts =? self.contracts.validator:
try:
await validatorContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start validator contract interactions: ", error = error.msg
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node"
if not self.taskpool.isNil:
self.taskpool.shutdown()
await self.trackedFutures.cancelTracked()
if not self.engine.isNil:
await self.engine.stop()
if not self.discovery.isNil:
await self.discovery.stop()
if clientContracts =? self.contracts.client:
await clientContracts.stop()
if hostContracts =? self.contracts.host:
await hostContracts.stop()
if validatorContracts =? self.contracts.validator:
await validatorContracts.stop()
if not self.clock.isNil:
await self.clock.stop()
if not self.networkStore.isNil:
await self.networkStore.close
proc new*(
T: type CodexNodeRef,
switch: Switch,
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
taskpool: Taskpool,
prover = Prover.none,
contracts = Contracts.default,
): CodexNodeRef =
## Create new instance of a Codex self, call `start` to run it
##
CodexNodeRef(
switch: switch,
networkStore: networkStore,
engine: engine,
prover: prover,
discovery: discovery,
taskPool: taskpool,
contracts: contracts,
trackedFutures: TrackedFutures(),
)

View File

@ -1,17 +0,0 @@
import pkg/stint
type
Periodicity* = object
seconds*: uint64
Period* = uint64
Timestamp* = uint64
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
timestamp div periodicity.seconds
func periodStart*(periodicity: Periodicity, period: Period): Timestamp =
period * periodicity.seconds
func periodEnd*(periodicity: Periodicity, period: Period): Timestamp =
periodicity.periodStart(period + 1)

View File

@ -1,74 +0,0 @@
import std/tables
import pkg/stint
import pkg/chronos
import pkg/questionable
import pkg/nimcrypto
import ./market
import ./clock
import ./purchasing/purchase
export questionable
export chronos
export market
export purchase
type
Purchasing* = ref object
market*: Market
clock: Clock
purchases: Table[PurchaseId, Purchase]
proofProbability*: UInt256
PurchaseTimeout* = Timeout
const DefaultProofProbability = 100.u256
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
proc load*(purchasing: Purchasing) {.async.} =
let market = purchasing.market
let requestIds = await market.myRequests()
for requestId in requestIds:
let purchase = Purchase.new(requestId, purchasing.market, purchasing.clock)
purchase.load()
purchasing.purchases[purchase.id] = purchase
proc start*(purchasing: Purchasing) {.async.} =
await purchasing.load()
proc stop*(purchasing: Purchasing) {.async.} =
discard
proc populate*(
purchasing: Purchasing, request: StorageRequest
): Future[StorageRequest] {.async.} =
result = request
if result.ask.proofProbability == 0.u256:
result.ask.proofProbability = purchasing.proofProbability
if result.nonce == Nonce.default:
var id = result.nonce.toArray
doAssert randomBytes(id) == 32
result.nonce = Nonce(id)
result.client = await purchasing.market.getSigner()
proc purchase*(
purchasing: Purchasing, request: StorageRequest
): Future[Purchase] {.async.} =
let request = await purchasing.populate(request)
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
purchase.start()
purchasing.purchases[purchase.id] = purchase
return purchase
func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase =
if purchasing.purchases.hasKey(id):
some purchasing.purchases[id]
else:
none Purchase
func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
var pIds: seq[PurchaseId] = @[]
for key in purchasing.purchases.keys:
pIds.add(key)
return pIds

View File

@ -1,74 +0,0 @@
import ./statemachine
import ./states/pending
import ./states/unknown
import ./purchaseid
# Purchase is implemented as a state machine.
#
# It can either be a new (pending) purchase that still needs to be submitted
# on-chain, or it is a purchase that was previously submitted on-chain, and
# we're just restoring its (unknown) state after a node restart.
#
# |
# v
# ------------------------- unknown
# | / /
# v v /
# pending ----> submitted ----> started ---------> finished <----/
# \ \ /
# \ ------------> failed <----/
# \ /
# --> cancelled <-----------------------
export Purchase
export purchaseid
export statemachine
func new*(
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
): Purchase =
## create a new instance of a Purchase
##
var purchase = Purchase.new()
{.cast(noSideEffect).}:
purchase.future = newFuture[void]()
purchase.requestId = requestId
purchase.market = market
purchase.clock = clock
return purchase
func new*(
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
): Purchase =
## Create a new purchase using the given market and clock
let purchase = Purchase.new(request.id, market, clock)
purchase.request = some request
return purchase
proc start*(purchase: Purchase) =
purchase.start(PurchasePending())
proc load*(purchase: Purchase) =
purchase.start(PurchaseUnknown())
proc wait*(purchase: Purchase) {.async.} =
await purchase.future
func id*(purchase: Purchase): PurchaseId =
PurchaseId(purchase.requestId)
func finished*(purchase: Purchase): bool =
purchase.future.finished
func error*(purchase: Purchase): ?(ref CatchableError) =
if purchase.future.failed:
some purchase.future.error
else:
none (ref CatchableError)
func state*(purchase: Purchase): ?string =
proc description(state: State): string =
$state
purchase.query(description)

View File

@ -1,14 +0,0 @@
import std/hashes
import ../logutils
type PurchaseId* = distinct array[32, byte]
logutils.formatIt(LogFormat.textLines, PurchaseId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId):
it.to0xHexLog
proc hash*(x: PurchaseId): Hash {.borrow.}
proc `==`*(x, y: PurchaseId): bool {.borrow.}
proc toHex*(x: PurchaseId): string =
array[32, byte](x).toHex

View File

@ -1,19 +0,0 @@
import ../utils/asyncstatemachine
import ../market
import ../clock
import ../errors
export market
export clock
export asyncstatemachine
type
Purchase* = ref object of Machine
future*: Future[void]
market*: Market
clock*: Clock
requestId*: RequestId
request*: ?StorageRequest
PurchaseState* = ref object of State
PurchaseError* = object of CodexError

View File

@ -1,35 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./error
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
logScope:
topics = "marketplace purchases cancelled"
type PurchaseCancelled* = ref object of PurchaseState
method `$`*(state: PurchaseCancelled): string =
"cancelled"
method run*(
state: PurchaseCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_cancelled.inc()
let purchase = Purchase(machine)
try:
warn "Request cancelled, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
let error = newException(Timeout, "Purchase cancelled due to timeout")
purchase.future.fail(error)
except CancelledError as e:
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseCancelled.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,26 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
declareCounter(codex_purchases_error, "codex purchases error")
logScope:
topics = "marketplace purchases errored"
type PurchaseErrored* = ref object of PurchaseState
error*: ref CatchableError
method `$`*(state: PurchaseErrored): string =
"errored"
method run*(
state: PurchaseErrored, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_error.inc()
let purchase = Purchase(machine)
error "Purchasing error",
error = state.error.msgDetail, requestId = purchase.requestId
purchase.future.fail(state.error)

View File

@ -1,30 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../logutils
import ../../utils/exceptions
import ./error
declareCounter(codex_purchases_failed, "codex purchases failed")
type PurchaseFailed* = ref object of PurchaseState
method `$`*(state: PurchaseFailed): string =
"failed"
method run*(
state: PurchaseFailed, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_failed.inc()
let purchase = Purchase(machine)
try:
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
except CancelledError as e:
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFailed.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
let error = newException(PurchaseError, "Purchase failed")
return some State(PurchaseErrored(error: error))

View File

@ -1,33 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
import ./error
declareCounter(codex_purchases_finished, "codex purchases finished")
logScope:
topics = "marketplace purchases finished"
type PurchaseFinished* = ref object of PurchaseState
method `$`*(state: PurchaseFinished): string =
"finished"
method run*(
state: PurchaseFinished, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_finished.inc()
let purchase = Purchase(machine)
try:
info "Purchase finished, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
purchase.future.complete()
except CancelledError as e:
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFinished.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,28 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./submitted
import ./error
declareCounter(codex_purchases_pending, "codex purchases pending")
type PurchasePending* = ref object of PurchaseState
method `$`*(state: PurchasePending): string =
"pending"
method run*(
state: PurchasePending, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_pending.inc()
let purchase = Purchase(machine)
try:
let request = !purchase.request
await purchase.market.requestStorage(request)
return some State(PurchaseSubmitted())
except CancelledError as e:
trace "PurchasePending.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchasePending.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,54 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_started, "codex purchases started")
logScope:
topics = "marketplace purchases started"
type PurchaseStarted* = ref object of PurchaseState
method `$`*(state: PurchaseStarted): string =
"started"
method run*(
state: PurchaseStarted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_started.inc()
let purchase = Purchase(machine)
let clock = purchase.clock
let market = purchase.market
info "All required slots filled, purchase started", requestId = purchase.requestId
let failed = newFuture[void]()
proc callback(_: RequestId) =
failed.complete()
var ended: Future[void]
try:
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
# Ensure that we're past the request end by waiting an additional second
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
let fut = await one(ended, failed)
await subscription.unsubscribe()
if fut.id == failed.id:
ended.cancelSoon()
return some State(PurchaseFailed())
else:
failed.cancelSoon()
return some State(PurchaseFinished())
except CancelledError as e:
ended.cancelSoon()
failed.cancelSoon()
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseStarted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,56 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./started
import ./cancelled
import ./error
logScope:
topics = "marketplace purchases submitted"
declareCounter(codex_purchases_submitted, "codex purchases submitted")
type PurchaseSubmitted* = ref object of PurchaseState
method `$`*(state: PurchaseSubmitted): string =
"submitted"
method run*(
state: PurchaseSubmitted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_submitted.inc()
let purchase = Purchase(machine)
let request = !purchase.request
let market = purchase.market
let clock = purchase.clock
info "Request submitted, waiting for slots to be filled",
requestId = purchase.requestId
proc wait() {.async.} =
let done = newAsyncEvent()
proc callback(_: RequestId) =
done.fire()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done.wait()
await subscription.unsubscribe()
proc withTimeout(future: Future[void]) {.async.} =
let expiry = (await market.requestExpiresAt(request.id)) + 1
trace "waiting for request fulfillment or expiry", expiry
await future.withTimeout(clock, expiry)
try:
await wait().withTimeout()
except Timeout:
return some State(PurchaseCancelled())
except CancelledError as e:
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseSubmitted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
return some State(PurchaseStarted())

View File

@ -1,44 +0,0 @@
import pkg/metrics
import ../../utils/exceptions
import ../../logutils
import ../statemachine
import ./submitted
import ./started
import ./cancelled
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_unknown, "codex purchases unknown")
type PurchaseUnknown* = ref object of PurchaseState
method `$`*(state: PurchaseUnknown): string =
"unknown"
method run*(
state: PurchaseUnknown, machine: Machine
): Future[?State] {.async: (raises: []).} =
try:
codex_purchases_unknown.inc()
let purchase = Purchase(machine)
if (request =? await purchase.market.getRequest(purchase.requestId)) and
(requestState =? await purchase.market.requestState(purchase.requestId)):
purchase.request = some request
case requestState
of RequestState.New:
return some State(PurchaseSubmitted())
of RequestState.Started:
return some State(PurchaseStarted())
of RequestState.Cancelled:
return some State(PurchaseCancelled())
of RequestState.Finished:
return some State(PurchaseFinished())
of RequestState.Failed:
return some State(PurchaseFailed())
except CancelledError as e:
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseUnknown.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

File diff suppressed because it is too large Load Diff

View File

@ -1,555 +0,0 @@
import std/sequtils
import std/sugar
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/datastore
import ./market
import ./clock
import ./stores
import ./contracts/requests
import ./contracts/marketplace
import ./logutils
import ./sales/salescontext
import ./sales/salesagent
import ./sales/statemachine
import ./sales/slotqueue
import ./sales/states/preparing
import ./sales/states/unknown
import ./utils/trackedfutures
import ./utils/exceptions
## Sales holds a list of available storage that it may sell.
##
## When storage is requested on the market that matches availability, the Sales
## object will instruct the Codex node to persist the requested data. Once the
## data has been persisted, it uploads a proof of storage to the market in an
## attempt to win a storage contract.
##
## Node Sales Market
## | | |
## | -- add availability --> | |
## | | <-- storage request --- |
## | <----- store data ------ | |
## | -----------------------> | |
## | | |
## | <----- prove data ---- | |
## | -----------------------> | |
## | | ---- storage proof ---> |
export stint
export reservations
export salesagent
export salescontext
logScope:
topics = "sales marketplace"
type Sales* = ref object
context*: SalesContext
agents*: seq[SalesAgent]
running: bool
subscriptions: seq[market.Subscription]
trackedFutures: TrackedFutures
proc `onStore=`*(sales: Sales, onStore: OnStore) =
sales.context.onStore = some onStore
proc `onClear=`*(sales: Sales, onClear: OnClear) =
sales.context.onClear = some onClear
proc `onSale=`*(sales: Sales, callback: OnSale) =
sales.context.onSale = some callback
proc `onProve=`*(sales: Sales, callback: OnProve) =
sales.context.onProve = some callback
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
sales.context.onExpiryUpdate = some callback
proc onStore*(sales: Sales): ?OnStore =
sales.context.onStore
proc onClear*(sales: Sales): ?OnClear =
sales.context.onClear
proc onSale*(sales: Sales): ?OnSale =
sales.context.onSale
proc onProve*(sales: Sales): ?OnProve =
sales.context.onProve
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate =
sales.context.onExpiryUpdate
proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales =
Sales.new(market, clock, repo, 0)
proc new*(
_: type Sales,
market: Market,
clock: Clock,
repo: RepoStore,
simulateProofFailures: int,
): Sales =
let reservations = Reservations.new(repo)
Sales(
context: SalesContext(
market: market,
clock: clock,
reservations: reservations,
slotQueue: SlotQueue.new(),
simulateProofFailures: simulateProofFailures,
),
trackedFutures: TrackedFutures.new(),
subscriptions: @[],
)
proc remove(sales: Sales, agent: SalesAgent) {.async: (raises: []).} =
await agent.stop()
if sales.running:
sales.agents.keepItIf(it != agent)
proc cleanUp(
sales: Sales, agent: SalesAgent, reprocessSlot: bool, returnedCollateral: ?UInt256
) {.async: (raises: []).} =
let data = agent.data
logScope:
topics = "sales cleanUp"
requestId = data.requestId
slotIndex = data.slotIndex
reservationId = data.reservation .? id |? ReservationId.default
availabilityId = data.reservation .? availabilityId |? AvailabilityId.default
trace "cleaning up sales agent"
# if reservation for the SalesAgent was not created, then it means
# that the cleanUp was called before the sales process really started, so
# there are not really any bytes to be returned
if request =? data.request and reservation =? data.reservation:
if returnErr =? (
await noCancel sales.context.reservations.returnBytesToAvailability(
reservation.availabilityId, reservation.id, request.ask.slotSize
)
).errorOption:
error "failure returning bytes",
error = returnErr.msg, bytes = request.ask.slotSize
# delete reservation and return reservation bytes back to the availability
if reservation =? data.reservation and
deleteErr =? (
await noCancel sales.context.reservations.deleteReservation(
reservation.id, reservation.availabilityId, returnedCollateral
)
).errorOption:
error "failure deleting reservation", error = deleteErr.msg
# Re-add items back into the queue to prevent small availabilities from
# draining the queue. Seen items will be ordered last.
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
let queue = sales.context.slotQueue
item.seen = true
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(item).errorOption:
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
let fut = sales.remove(agent)
sales.trackedFutures.track(fut)
proc filled(sales: Sales, request: StorageRequest, slotIndex: uint64) =
if onSale =? sales.context.onSale:
onSale(request, slotIndex)
proc processSlot(
sales: Sales, item: SlotQueueItem
) {.async: (raises: [CancelledError]).} =
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
let agent = newSalesAgent(
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
)
let completed = newAsyncEvent()
agent.onCleanUp = proc(
reprocessSlot = false, returnedCollateral = UInt256.none
) {.async: (raises: []).} =
trace "slot cleanup"
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
completed.fire()
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
trace "slot filled"
sales.filled(request, slotIndex)
completed.fire()
agent.start(SalePreparing())
sales.agents.add agent
trace "waiting for slot processing to complete"
await completed.wait()
trace "slot processing completed"
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
let reservations = sales.context.reservations
without reservs =? await reservations.all(Reservation):
return
let unused = reservs.filter(
r => (
let slotId = slotId(r.requestId, r.slotIndex)
not activeSlots.any(slot => slot.id == slotId)
)
)
if unused.len == 0:
return
info "Found unused reservations for deletion", unused = unused.len
for reservation in unused:
logScope:
reservationId = reservation.id
availabilityId = reservation.availabilityId
if err =? (
await reservations.deleteReservation(reservation.id, reservation.availabilityId)
).errorOption:
error "Failed to delete unused reservation", error = err.msg
else:
trace "Deleted unused reservation"
proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} =
let market = sales.context.market
let slotIds = await market.mySlots()
var slots: seq[Slot] = @[]
info "Loading active slots", slotsCount = len(slots)
for slotId in slotIds:
if slot =? (await market.getActiveSlot(slotId)):
slots.add slot
return slots
proc activeSale*(sales: Sales, slotId: SlotId): Future[?SalesAgent] {.async.} =
for agent in sales.agents:
if slotId(agent.data.requestId, agent.data.slotIndex) == slotId:
return some agent
return none SalesAgent
proc load*(sales: Sales) {.async.} =
let activeSlots = await sales.mySlots()
await sales.deleteInactiveReservations(activeSlots)
for slot in activeSlots:
let agent =
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
agent.onCleanUp = proc(
reprocessSlot = false, returnedCollateral = UInt256.none
) {.async: (raises: []).} =
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
# are inherently already filled and so assigning agent.onFilled would be
# superfluous.
agent.start(SaleUnknown())
sales.agents.add agent
proc OnAvailabilitySaved(
sales: Sales, availability: Availability
) {.async: (raises: []).} =
## When availabilities are modified or added, the queue should be unpaused if
## it was paused and any slots in the queue should have their `seen` flag
## cleared.
let queue = sales.context.slotQueue
queue.clearSeenFlags()
if queue.paused:
trace "unpausing queue after new availability added"
queue.unpause()
proc onStorageRequested(
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
) {.raises: [].} =
logScope:
topics = "marketplace sales onStorageRequested"
requestId
slots = ask.slots
expiry
let slotQueue = sales.context.slotQueue
trace "storage requested, adding slots to queue"
let market = sales.context.market
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
err:
error "Request failure, unable to calculate collateral", error = err.msg
return
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
if err of SlotsOutOfRangeError:
warn "Too many slots, cannot add to queue"
else:
warn "Failed to create slot queue items from request", error = err.msg
return
for item in items:
# continue on failure
if err =? slotQueue.push(item).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue becaue it already exists"
elif err of QueueNotRunningError:
warn "Failed to push item to queue becaue queue is not running"
else:
warn "Error adding request to SlotQueue", error = err.msg
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
logScope:
topics = "marketplace sales onSlotFreed"
requestId
slotIndex
trace "slot freed, adding to queue"
proc addSlotToQueue() {.async: (raises: []).} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
try:
without request =? (await market.getRequest(requestId)), err:
error "unknown request in contract", error = err.msgDetail
return
# Take the repairing state into consideration to calculate the collateral.
# This is particularly needed because it will affect the priority in the queue
# and we want to give the user the ability to tweak the parameters.
# Adding the repairing state directly in the queue priority calculation
# would not allow this flexibility.
without collateral =?
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
error "Failed to add freed slot to queue: unable to calculate collateral",
error = err.msg
return
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
without slotQueueItem =?
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
err:
warn "Too many slots, cannot add to queue", error = err.msgDetail
return
if err =? queue.push(slotQueueItem).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue because it already exists",
error = err.msgDetail
elif err of QueueNotRunningError:
warn "Failed to push item to queue because queue is not running",
error = err.msgDetail
except CancelledError as e:
trace "sales.addSlotToQueue was cancelled"
# We could get rid of this by adding the storage ask in the SlotFreed event,
# so we would not need to call getRequest to get the collateralPerSlot.
let fut = addSlotToQueue()
sales.trackedFutures.track(fut)
proc subscribeRequested(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
proc onStorageRequested(
requestId: RequestId, ask: StorageAsk, expiry: uint64
) {.raises: [].} =
sales.onStorageRequested(requestId, ask, expiry)
try:
let sub = await market.subscribeRequests(onStorageRequested)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to storage request events", msg = e.msg
proc subscribeCancellation(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onCancelled(requestId: RequestId) =
trace "request cancelled (via contract RequestCancelled event), removing all request slots from queue"
queue.delete(requestId)
try:
let sub = await market.subscribeRequestCancelled(onCancelled)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to cancellation events", msg = e.msg
proc subscribeFulfilled*(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onFulfilled(requestId: RequestId) =
trace "request fulfilled, removing all request slots from queue"
queue.delete(requestId)
for agent in sales.agents:
agent.onFulfilled(requestId)
try:
let sub = await market.subscribeFulfillment(onFulfilled)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to storage fulfilled events", msg = e.msg
proc subscribeFailure(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onFailed(requestId: RequestId) =
trace "request failed, removing all request slots from queue"
queue.delete(requestId)
for agent in sales.agents:
agent.onFailed(requestId)
try:
let sub = await market.subscribeRequestFailed(onFailed)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to storage failure events", msg = e.msg
proc subscribeSlotFilled(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
trace "slot filled, removing from slot queue", requestId, slotIndex
queue.delete(requestId, slotIndex.uint16)
for agent in sales.agents:
agent.onSlotFilled(requestId, slotIndex)
try:
let sub = await market.subscribeSlotFilled(onSlotFilled)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to slot filled events", msg = e.msg
proc subscribeSlotFreed(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
proc onSlotFreed(requestId: RequestId, slotIndex: uint64) =
sales.onSlotFreed(requestId, slotIndex)
try:
let sub = await market.subscribeSlotFreed(onSlotFreed)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to slot freed events", msg = e.msg
proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) =
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
trace "reservations for slot full, removing from slot queue", requestId, slotIndex
queue.delete(requestId, slotIndex.uint16)
try:
let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to slot filled events", msg = e.msg
proc startSlotQueue(sales: Sales) =
let slotQueue = sales.context.slotQueue
let reservations = sales.context.reservations
slotQueue.onProcessSlot = proc(item: SlotQueueItem) {.async: (raises: []).} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
try:
await sales.processSlot(item)
except CancelledError:
discard
slotQueue.start()
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
if availability.enabled:
await sales.OnAvailabilitySaved(availability)
reservations.OnAvailabilitySaved = OnAvailabilitySaved
proc subscribe(sales: Sales) {.async.} =
await sales.subscribeRequested()
await sales.subscribeFulfilled()
await sales.subscribeFailure()
await sales.subscribeSlotFilled()
await sales.subscribeSlotFreed()
await sales.subscribeCancellation()
await sales.subscribeSlotReservationsFull()
proc unsubscribe(sales: Sales) {.async.} =
for sub in sales.subscriptions:
try:
await sub.unsubscribe()
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to unsubscribe from subscription", error = e.msg
proc start*(sales: Sales) {.async.} =
await sales.load()
sales.startSlotQueue()
await sales.subscribe()
sales.running = true
proc stop*(sales: Sales) {.async.} =
trace "stopping sales"
sales.running = false
await sales.context.slotQueue.stop()
await sales.unsubscribe()
await sales.trackedFutures.cancelTracked()
for agent in sales.agents:
await agent.stop()
sales.agents = @[]

View File

@ -1,764 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
##
## +--------------------------------------+
## | RESERVATION |
## +---------------------------------------------------+ |--------------------------------------|
## | AVAILABILITY | | ReservationId | id | PK |
## |---------------------------------------------------| |--------------------------------------|
## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK |
## |---------------------------------------------------| |--------------------------------------|
## | UInt256 | totalSize | | | UInt256 | size | |
## |---------------------------------------------------| |--------------------------------------|
## | UInt256 | freeSize | | | UInt256 | slotIndex | |
## |---------------------------------------------------| +--------------------------------------+
## | UInt256 | duration | |
## |---------------------------------------------------|
## | UInt256 | minPricePerBytePerSecond | |
## |---------------------------------------------------|
## | UInt256 | totalCollateral | |
## |---------------------------------------------------|
## | UInt256 | totalRemainingCollateral | |
## +---------------------------------------------------+
import pkg/upraises
push:
{.upraises: [].}
import std/sequtils
import std/sugar
import std/typetraits
import std/sequtils
import std/times
import pkg/chronos
import pkg/datastore
import pkg/nimcrypto
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/stew/byteutils
import ../codextypes
import ../logutils
import ../clock
import ../stores
import ../market
import ../contracts/requests
import ../utils/json
import ../units
export requests
export logutils
logScope:
topics = "marketplace sales reservations"
type
AvailabilityId* = distinct array[32, byte]
ReservationId* = distinct array[32, byte]
SomeStorableObject = Availability | Reservation
SomeStorableId = AvailabilityId | ReservationId
Availability* = ref object
id* {.serialize.}: AvailabilityId
totalSize* {.serialize.}: uint64
freeSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
minPricePerBytePerSecond* {.serialize.}: UInt256
totalCollateral {.serialize.}: UInt256
totalRemainingCollateral* {.serialize.}: UInt256
# If set to false, the availability will not accept new slots.
# If enabled, it will not impact any existing slots that are already being hosted.
enabled* {.serialize.}: bool
# Specifies the latest timestamp after which the availability will no longer host any slots.
# If set to 0, there will be no restrictions.
until* {.serialize.}: SecondsSince1970
Reservation* = ref object
id* {.serialize.}: ReservationId
availabilityId* {.serialize.}: AvailabilityId
size* {.serialize.}: uint64
requestId* {.serialize.}: RequestId
slotIndex* {.serialize.}: uint64
validUntil* {.serialize.}: SecondsSince1970
Reservations* = ref object of RootObj
availabilityLock: AsyncLock
# Lock for protecting assertions of availability's sizes when searching for matching availability
repo: RepoStore
OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.
upraises: [], gcsafe, async: (raises: [CancelledError]), closure
.}
IterDispose* =
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
upraises: [], gcsafe, async: (raises: [])
.}
StorableIter* = ref object
finished*: bool
next*: GetNext
dispose*: IterDispose
ReservationsError* = object of CodexError
ReserveFailedError* = object of ReservationsError
ReleaseFailedError* = object of ReservationsError
DeleteFailedError* = object of ReservationsError
GetFailedError* = object of ReservationsError
NotExistsError* = object of ReservationsError
SerializationError* = object of ReservationsError
UpdateFailedError* = object of ReservationsError
BytesOutOfBoundsError* = object of ReservationsError
UntilOutOfBoundsError* = object of ReservationsError
const
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
ReservationsKey = (SalesKey / "reservations").tryGet
proc hash*(x: AvailabilityId): Hash {.borrow.}
proc all*(
self: Reservations, T: type SomeStorableObject
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
proc all*(
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
template withLock(lock, body) =
try:
await lock.acquire()
body
finally:
if lock.locked:
lock.release()
proc new*(T: type Reservations, repo: RepoStore): Reservations =
T(availabilityLock: newAsyncLock(), repo: repo)
proc init*(
_: type Availability,
totalSize: uint64,
freeSize: uint64,
duration: uint64,
minPricePerBytePerSecond: UInt256,
totalCollateral: UInt256,
enabled: bool,
until: SecondsSince1970,
): Availability =
var id: array[32, byte]
doAssert randomBytes(id) == 32
Availability(
id: AvailabilityId(id),
totalSize: totalSize,
freeSize: freeSize,
duration: duration,
minPricePerBytePerSecond: minPricePerBytePerSecond,
totalCollateral: totalCollateral,
totalRemainingCollateral: totalCollateral,
enabled: enabled,
until: until,
)
func totalCollateral*(self: Availability): UInt256 {.inline.} =
return self.totalCollateral
proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} =
self.totalCollateral = value
self.totalRemainingCollateral = value
proc init*(
_: type Reservation,
availabilityId: AvailabilityId,
size: uint64,
requestId: RequestId,
slotIndex: uint64,
validUntil: SecondsSince1970,
): Reservation =
var id: array[32, byte]
doAssert randomBytes(id) == 32
Reservation(
id: ReservationId(id),
availabilityId: availabilityId,
size: size,
requestId: requestId,
slotIndex: slotIndex,
validUntil: validUntil,
)
func toArray(id: SomeStorableId): array[32, byte] =
array[32, byte](id)
proc `==`*(x, y: AvailabilityId): bool {.borrow.}
proc `==`*(x, y: ReservationId): bool {.borrow.}
proc `==`*(x, y: Reservation): bool =
x.id == y.id
proc `==`*(x, y: Availability): bool =
x.id == y.id
proc `$`*(id: SomeStorableId): string =
id.toArray.toHex
proc toErr[E1: ref CatchableError, E2: ReservationsError](
e1: E1, _: type E2, msg: string = e1.msg
): ref E2 =
return newException(E2, msg, e1)
logutils.formatIt(LogFormat.textLines, SomeStorableId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, SomeStorableId):
it.to0xHexLog
proc `OnAvailabilitySaved=`*(
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
) =
self.OnAvailabilitySaved = some OnAvailabilitySaved
func key*(id: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId>
(ReservationsKey / $id)
func key*(reservationId: ReservationId, availabilityId: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId> / <reservationId>
(availabilityId.key / $reservationId)
func key*(availability: Availability): ?!Key =
return availability.id.key
func maxCollateralPerByte*(availability: Availability): UInt256 =
# If freeSize happens to be zero, we convention that the maxCollateralPerByte
# should be equal to totalRemainingCollateral.
if availability.freeSize == 0.uint64:
return availability.totalRemainingCollateral
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
func key*(reservation: Reservation): ?!Key =
return key(reservation.id, reservation.availabilityId)
func available*(self: Reservations): uint =
self.repo.available.uint
func hasAvailable*(self: Reservations, bytes: uint): bool =
self.repo.available(bytes.NBytes)
proc exists*(
self: Reservations, key: Key
): Future[bool] {.async: (raises: [CancelledError]).} =
let exists = await self.repo.metaDs.ds.contains(key)
return exists
iterator items(self: StorableIter): auto =
while not self.finished:
yield self.next()
proc getImpl(
self: Reservations, key: Key
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
if not await self.exists(key):
let err =
newException(NotExistsError, "object with key " & $key & " does not exist")
return failure(err)
without serialized =? await self.repo.metaDs.ds.get(key), error:
return failure(error.toErr(GetFailedError))
return success serialized
proc get*(
self: Reservations, key: Key, T: type SomeStorableObject
): Future[?!T] {.async: (raises: [CancelledError]).} =
without serialized =? await self.getImpl(key), error:
return failure(error)
without obj =? T.fromJson(serialized), error:
return failure(error.toErr(SerializationError))
return success obj
proc updateImpl(
self: Reservations, obj: SomeStorableObject
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "updating " & $(obj.type), id = obj.id
without key =? obj.key, error:
return failure(error)
if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption:
return failure(err.toErr(UpdateFailedError))
return success()
proc updateAvailability(
self: Reservations, obj: Availability
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
availabilityId = obj.id
if obj.until < 0:
let error =
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
return failure(error)
without key =? obj.key, error:
return failure(error)
without oldAvailability =? await self.get(key, Availability), err:
if err of NotExistsError:
trace "Creating new Availability"
let res = await self.updateImpl(obj)
# inform subscribers that Availability has been added
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
await OnAvailabilitySaved(obj)
return res
else:
return failure(err)
if obj.until > 0:
without allReservations =? await self.all(Reservation, obj.id), error:
error.msg = "Error updating reservation: " & error.msg
return failure(error)
let requestEnds = allReservations.mapIt(it.validUntil)
if requestEnds.len > 0 and requestEnds.max > obj.until:
let error = newException(
UntilOutOfBoundsError,
"Until parameter must be greater or equal to the longest currently hosted slot",
)
return failure(error)
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
if oldAvailability.totalSize != obj.totalSize:
trace "totalSize changed, updating repo reservation"
if oldAvailability.totalSize < obj.totalSize: # storage added
if reserveErr =? (
await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes)
).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
elif oldAvailability.totalSize > obj.totalSize: # storage removed
if reserveErr =? (
await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes)
).errorOption:
return failure(reserveErr.toErr(ReleaseFailedError))
let res = await self.updateImpl(obj)
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
# availability updated
# inform subscribers that Availability has been modified (with increased
# size)
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
await OnAvailabilitySaved(obj)
return res
proc update*(
self: Reservations, obj: Reservation
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateImpl(obj)
proc update*(
self: Reservations, obj: Availability
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
withLock(self.availabilityLock):
return await self.updateAvailability(obj)
except AsyncLockError as e:
error "Lock error when trying to update the availability", err = e.msg
return failure(e)
proc delete(
self: Reservations, key: Key
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "deleting object", key
if not await self.exists(key):
return success()
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
return failure(err.toErr(DeleteFailedError))
return success()
proc deleteReservation*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
returnedCollateral: ?UInt256 = UInt256.none,
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
reservationId
availabilityId
trace "deleting reservation"
without key =? key(reservationId, availabilityId), error:
return failure(error)
try:
withLock(self.availabilityLock):
without reservation =? (await self.get(key, Reservation)), error:
if error of NotExistsError:
return success()
else:
return failure(error)
without availabilityKey =? availabilityId.key, error:
return failure(error)
without var availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
if reservation.size > 0.uint64:
trace "returning remaining reservation bytes to availability",
size = reservation.size
availability.freeSize += reservation.size
if collateral =? returnedCollateral:
availability.totalRemainingCollateral += collateral
if updateErr =? (await self.updateAvailability(availability)).errorOption:
return failure(updateErr)
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
return failure(err.toErr(DeleteFailedError))
return success()
except AsyncLockError as e:
error "Lock error when trying to delete the availability", err = e.msg
return failure(e)
# TODO: add support for deleting availabilities
# To delete, must not have any active sales.
proc createAvailability*(
self: Reservations,
size: uint64,
duration: uint64,
minPricePerBytePerSecond: UInt256,
totalCollateral: UInt256,
enabled: bool,
until: SecondsSince1970,
): Future[?!Availability] {.async: (raises: [CancelledError]).} =
trace "creating availability",
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
if until < 0:
let error =
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
return failure(error)
let availability = Availability.init(
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
)
let bytes = availability.freeSize
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
if updateErr =? (await self.update(availability)).errorOption:
# rollback the reserve
trace "rolling back reserve"
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
return failure(updateErr)
return success(availability)
method createReservation*(
self: Reservations,
availabilityId: AvailabilityId,
slotSize: uint64,
requestId: RequestId,
slotIndex: uint64,
collateralPerByte: UInt256,
validUntil: SecondsSince1970,
): Future[?!Reservation] {.async: (raises: [CancelledError]), base.} =
try:
withLock(self.availabilityLock):
without availabilityKey =? availabilityId.key, error:
return failure(error)
without availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
if availability.freeSize < slotSize:
let error = newException(
BytesOutOfBoundsError,
"trying to reserve an amount of bytes that is greater than the free size of the Availability",
)
return failure(error)
trace "Creating reservation",
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
let reservation =
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
if createResErr =? (await self.update(reservation)).errorOption:
return failure(createResErr)
# reduce availability freeSize by the slot size, which is now accounted for in
# the newly created Reservation
availability.freeSize -= slotSize
# adjust the remaining totalRemainingCollateral
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
# update availability with reduced size
trace "Updating availability with reduced size", freeSize = availability.freeSize
if updateErr =? (await self.updateAvailability(availability)).errorOption:
trace "Updating availability failed, rolling back reservation creation"
without key =? reservation.key, keyError:
keyError.parent = updateErr
return failure(keyError)
# rollback the reservation creation
if rollbackErr =? (await self.delete(key)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
return failure(updateErr)
trace "Reservation succesfully created"
return success(reservation)
except AsyncLockError as e:
error "Lock error when trying to delete the availability", err = e.msg
return failure(e)
proc returnBytesToAvailability*(
self: Reservations,
availabilityId: AvailabilityId,
reservationId: ReservationId,
bytes: uint64,
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
reservationId
availabilityId
try:
withLock(self.availabilityLock):
without key =? key(reservationId, availabilityId), error:
return failure(error)
without var reservation =? (await self.get(key, Reservation)), error:
return failure(error)
# We are ignoring bytes that are still present in the Reservation because
# they will be returned to Availability through `deleteReservation`.
let bytesToBeReturned = bytes - reservation.size
if bytesToBeReturned == 0:
trace "No bytes are returned",
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
return success()
trace "Returning bytes",
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
# First lets see if we can re-reserve the bytes, if the Repo's quota
# is depleted then we will fail-fast as there is nothing to be done atm.
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
without availabilityKey =? availabilityId.key, error:
return failure(error)
without var availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
availability.freeSize += bytesToBeReturned
# Update availability with returned size
if updateErr =? (await self.updateAvailability(availability)).errorOption:
trace "Rolling back returning bytes"
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
return failure(updateErr)
return success()
except AsyncLockError as e:
error "Lock error when returning bytes to the availability", err = e.msg
return failure(e)
proc release*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
bytes: uint,
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
topics = "release"
bytes
reservationId
availabilityId
trace "releasing bytes and updating reservation"
without key =? key(reservationId, availabilityId), error:
return failure(error)
without var reservation =? (await self.get(key, Reservation)), error:
return failure(error)
if reservation.size < bytes:
let error = newException(
BytesOutOfBoundsError,
"trying to release an amount of bytes that is greater than the total size of the Reservation",
)
return failure(error)
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
return failure(releaseErr.toErr(ReleaseFailedError))
reservation.size -= bytes
# persist partially used Reservation with updated size
if err =? (await self.update(reservation)).errorOption:
# rollback release if an update error encountered
trace "rolling back release"
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
rollbackErr.parent = err
return failure(rollbackErr)
return failure(err)
return success()
proc storables(
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
var iter = StorableIter()
let query = Query.init(queryKey)
when T is Availability:
# should indicate key length of 4, but let the .key logic determine it
without defaultKey =? AvailabilityId.default.key, error:
return failure(error)
elif T is Reservation:
# should indicate key length of 5, but let the .key logic determine it
without defaultKey =? key(ReservationId.default, AvailabilityId.default), error:
return failure(error)
else:
raiseAssert "unknown type"
without results =? await self.repo.metaDs.ds.query(query), error:
return failure(error)
# /sales/reservations
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
await idleAsync()
iter.finished = results.finished
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
key =? res.key and key.namespaces.len == defaultKey.namespaces.len:
return some res.data
return none seq[byte]
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
return await results.dispose()
iter.next = next
iter.dispose = dispose
return success iter
proc allImpl(
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
var ret: seq[T] = @[]
without storables =? (await self.storables(T, queryKey)), error:
return failure(error)
for storable in storables.items:
try:
without bytes =? (await storable):
continue
without obj =? T.fromJson(bytes), error:
error "json deserialization error",
json = string.fromBytes(bytes), error = error.msg
continue
ret.add obj
except CancelledError as err:
raise err
except CatchableError as err:
error "Error when retrieving storable", error = err.msg
continue
return success(ret)
proc all*(
self: Reservations, T: type SomeStorableObject
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
return await self.allImpl(T)
proc all*(
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
without key =? key(availabilityId):
return failure("no key")
return await self.allImpl(T, key)
proc findAvailability*(
self: Reservations,
size, duration: uint64,
pricePerBytePerSecond, collateralPerByte: UInt256,
validUntil: SecondsSince1970,
): Future[?Availability] {.async: (raises: [CancelledError]).} =
without storables =? (await self.storables(Availability)), e:
error "failed to get all storables", error = e.msg
return none Availability
for item in storables.items:
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
if availability.enabled and size <= availability.freeSize and
duration <= availability.duration and
collateralPerByte <= availability.maxCollateralPerByte and
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
(availability.until == 0 or availability.until >= validUntil):
trace "availability matched",
id = availability.id,
enabled = availability.enabled,
size,
availFreeSize = availability.freeSize,
duration,
availDuration = availability.duration,
pricePerBytePerSecond,
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
collateralPerByte,
availMaxCollateralPerByte = availability.maxCollateralPerByte,
until = availability.until
# TODO: As soon as we're on ARC-ORC, we can use destructors
# to automatically dispose our iterators when they fall out of scope.
# For now:
if err =? (await storables.dispose()).errorOption:
error "failed to dispose storables iter", error = err.msg
return none Availability
return some availability
trace "availability did not match",
id = availability.id,
enabled = availability.enabled,
size,
availFreeSize = availability.freeSize,
duration,
availDuration = availability.duration,
pricePerBytePerSecond,
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
collateralPerByte,
availMaxCollateralPerByte = availability.maxCollateralPerByte,
until = availability.until

View File

@ -1,155 +0,0 @@
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/upraises
import ../contracts/requests
import ../errors
import ../logutils
import ../utils/exceptions
import ./statemachine
import ./salescontext
import ./salesdata
import ./reservations
import ./slotqueue
export reservations
logScope:
topics = "marketplace sales"
type
SalesAgent* = ref object of Machine
context*: SalesContext
data*: SalesData
subscribed: bool
# Slot-level callbacks.
onCleanUp*: OnCleanUp
onFilled*: ?OnFilled
OnCleanUp* = proc(reprocessSlot = false, returnedCollateral = UInt256.none) {.
async: (raises: [])
.}
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
SalesAgentError = object of CodexError
AllSlotsFilledError* = object of SalesAgentError
func `==`*(a, b: SalesAgent): bool =
a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex
proc newSalesAgent*(
context: SalesContext,
requestId: RequestId,
slotIndex: uint64,
request: ?StorageRequest,
slotQueueItem = SlotQueueItem.none,
): SalesAgent =
var agent = SalesAgent.new()
agent.context = context
agent.data = SalesData(
requestId: requestId,
slotIndex: slotIndex,
request: request,
slotQueueItem: slotQueueItem,
)
return agent
proc retrieveRequest*(agent: SalesAgent) {.async.} =
let data = agent.data
let market = agent.context.market
if data.request.isNone:
data.request = await market.getRequest(data.requestId)
proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} =
let data = agent.data
let market = agent.context.market
return await market.requestState(data.requestId)
func state*(agent: SalesAgent): ?string =
proc description(state: State): string =
$state
agent.query(description)
proc subscribeCancellation(agent: SalesAgent) {.async.} =
let data = agent.data
let clock = agent.context.clock
proc onCancelled() {.async: (raises: []).} =
without request =? data.request:
return
try:
let market = agent.context.market
let expiry = await market.requestExpiresAt(data.requestId)
while true:
let deadline = max(clock.now, expiry) + 1
trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline
await clock.waitUntil(deadline)
without state =? await agent.retrieveRequestState():
error "Unknown request", requestId = data.requestId
return
case state
of New:
discard
of RequestState.Cancelled:
agent.schedule(cancelledEvent(request))
break
of RequestState.Started, RequestState.Finished, RequestState.Failed:
break
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
currentState = state, now = clock.now
except CancelledError:
trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId
except CatchableError as e:
error "Error while waiting for expiry to lapse", error = e.msgDetail
data.cancelled = onCancelled()
method onFulfilled*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
let cancelled = agent.data.cancelled
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
cancelled.cancelSoon()
method onFailed*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
without request =? agent.data.request:
return
if agent.data.requestId == requestId:
agent.schedule(failedEvent(request))
method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
) {.base, gcsafe, upraises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex))
proc subscribe*(agent: SalesAgent) {.async.} =
if agent.subscribed:
return
await agent.subscribeCancellation()
agent.subscribed = true
proc unsubscribe*(agent: SalesAgent) {.async: (raises: []).} =
if not agent.subscribed:
return
let data = agent.data
if not data.cancelled.isNil and not data.cancelled.finished:
await data.cancelled.cancelAndWait()
data.cancelled = nil
agent.subscribed = false
proc stop*(agent: SalesAgent) {.async: (raises: []).} =
await Machine(agent).stop()
await agent.unsubscribe()

View File

@ -1,44 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import pkg/libp2p/cid
import ../market
import ../clock
import ./slotqueue
import ./reservations
import ../blocktype as bt
type
SalesContext* = ref object
market*: Market
clock*: Clock
# Sales-level callbacks. Closure will be overwritten each time a slot is
# processed.
onStore*: ?OnStore
onClear*: ?OnClear
onSale*: ?OnSale
onProve*: ?OnProve
onExpiryUpdate*: ?OnExpiryUpdate
reservations*: Reservations
slotQueue*: SlotQueue
simulateProofFailures*: int
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
OnStore* = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
blocksCb: BlocksCb,
isRepairing: bool,
): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, async: (raises: [CancelledError])
.}
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}

View File

@ -1,14 +0,0 @@
import pkg/chronos
import ../contracts/requests
import ../market
import ./reservations
import ./slotqueue
type SalesData* = ref object
requestId*: RequestId
ask*: StorageAsk
request*: ?StorageRequest
slotIndex*: uint64
cancelled*: Future[void]
reservation*: ?Reservation
slotQueueItem*: ?SlotQueueItem

View File

@ -1,409 +0,0 @@
import std/sequtils
import std/tables
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import ../errors
import ../logutils
import ../rng
import ../utils
import ../contracts/requests
import ../utils/asyncheapqueue
import ../utils/trackedfutures
logScope:
topics = "marketplace slotqueue"
type
OnProcessSlot* =
proc(item: SlotQueueItem): Future[void] {.gcsafe, async: (raises: []).}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
# but the heap invariant would no longer be honoured. When non-ref, the
# compiler can ensure that statement will fail).
SlotQueueItem* = object
requestId: RequestId
slotIndex: uint16
slotSize: uint64
duration: uint64
pricePerBytePerSecond: UInt256
collateral: UInt256 # Collateral computed
expiry: ?uint64
seen: bool
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
# because AsyncHeapQueue size is of type `int`, which is larger than `uint16`
SlotQueueSize = range[1'u16 .. uint16.high]
SlotQueue* = ref object
maxWorkers: int
onProcessSlot: ?OnProcessSlot
queue: AsyncHeapQueue[SlotQueueItem]
running: bool
trackedFutures: TrackedFutures
unpaused: AsyncEvent
SlotQueueError = object of CodexError
SlotQueueItemExistsError* = object of SlotQueueError
SlotQueueItemNotExistsError* = object of SlotQueueError
SlotsOutOfRangeError* = object of SlotQueueError
QueueNotRunningError* = object of SlotQueueError
# Number of concurrent workers used for processing SlotQueueItems
const DefaultMaxWorkers = 3
# Cap slot queue size to prevent unbounded growth and make sifting more
# efficient. Max size is not equivalent to the number of slots a host can
# service, which is limited by host availabilities and new requests circulating
# the network. Additionally, each new request/slot in the network will be
# included in the queue if it is higher priority than any of the exisiting
# items. Older slots should be unfillable over time as other hosts fill the
# slots.
const DefaultMaxSize = 128'u16
proc profitability(item: SlotQueueItem): UInt256 =
StorageAsk(
duration: item.duration,
pricePerBytePerSecond: item.pricePerBytePerSecond,
slotSize: item.slotSize,
).pricePerSlot
proc `<`*(a, b: SlotQueueItem): bool =
# for A to have a higher priority than B (in a min queue), A must be less than
# B.
var scoreA: uint8 = 0
var scoreB: uint8 = 0
proc addIf(score: var uint8, condition: bool, addition: int) =
if condition:
score += 1'u8 shl addition
scoreA.addIf(a.seen < b.seen, 4)
scoreB.addIf(a.seen > b.seen, 4)
scoreA.addIf(a.profitability > b.profitability, 3)
scoreB.addIf(a.profitability < b.profitability, 3)
scoreA.addIf(a.collateral < b.collateral, 2)
scoreB.addIf(a.collateral > b.collateral, 2)
if expiryA =? a.expiry and expiryB =? b.expiry:
scoreA.addIf(expiryA > expiryB, 1)
scoreB.addIf(expiryA < expiryB, 1)
return scoreA > scoreB
proc `==`*(a, b: SlotQueueItem): bool =
a.requestId == b.requestId and a.slotIndex == b.slotIndex
proc new*(
_: type SlotQueue,
maxWorkers = DefaultMaxWorkers,
maxSize: SlotQueueSize = DefaultMaxSize,
): SlotQueue =
if maxWorkers <= 0:
raise newException(ValueError, "maxWorkers must be positive")
if maxWorkers.uint16 > maxSize:
raise newException(ValueError, "maxWorkers must be less than maxSize")
SlotQueue(
maxWorkers: maxWorkers,
# Add 1 to always allow for an extra item to be pushed onto the queue
# temporarily. After push (and sort), the bottom-most item will be deleted
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
running: false,
trackedFutures: TrackedFutures.new(),
unpaused: newAsyncEvent(),
)
# avoid instantiating `workers` in constructor to avoid side effects in
# `newAsyncQueue` procedure
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: ?uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem(
requestId: requestId,
slotIndex: slotIndex,
slotSize: ask.slotSize,
duration: ask.duration,
pricePerBytePerSecond: ask.pricePerBytePerSecond,
collateral: collateral,
expiry: expiry,
seen: seen,
)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem.init(requestId, slotIndex, ask, some expiry, collateral, seen)
proc init*(
_: type SlotQueueItem,
request: StorageRequest,
slotIndex: uint16,
collateral: UInt256,
): SlotQueueItem =
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: ?uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
if not ask.slots.inRange:
raise newException(SlotsOutOfRangeError, "Too many slots")
var i = 0'u16
proc initSlotQueueItem(): SlotQueueItem =
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
inc i
return item
var items = newSeqWith(ask.slots.int, initSlotQueueItem())
Rng.instance.shuffle(items)
return items
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
SlotQueueItem.init(requestId, ask, some expiry, collateral)
proc init*(
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, uint64.none, collateral)
proc inRange*(val: SomeUnsignedInt): bool =
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
proc requestId*(self: SlotQueueItem): RequestId =
self.requestId
proc slotIndex*(self: SlotQueueItem): uint16 =
self.slotIndex
proc slotSize*(self: SlotQueueItem): uint64 =
self.slotSize
proc duration*(self: SlotQueueItem): uint64 =
self.duration
proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 =
self.pricePerBytePerSecond
proc collateralPerByte*(self: SlotQueueItem): UInt256 =
self.collateralPerByte
proc seen*(self: SlotQueueItem): bool =
self.seen
proc `seen=`*(self: var SlotQueueItem, seen: bool) =
self.seen = seen
proc running*(self: SlotQueue): bool =
self.running
proc len*(self: SlotQueue): int =
self.queue.len
proc size*(self: SlotQueue): int =
self.queue.size - 1
proc paused*(self: SlotQueue): bool =
not self.unpaused.isSet
proc `$`*(self: SlotQueue): string =
$self.queue
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
self.onProcessSlot = some onProcessSlot
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
self.queue.contains(item)
proc pause*(self: SlotQueue) =
# set unpaused flag to false -- coroutines will block on unpaused.wait()
self.unpaused.clear()
proc unpause*(self: SlotQueue) =
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
self.unpaused.fire()
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
seen = item.seen
trace "pushing item to queue"
if not self.running:
let err = newException(QueueNotRunningError, "queue not running")
return failure(err)
if self.contains(item):
let err = newException(SlotQueueItemExistsError, "item already exists")
return failure(err)
if err =? self.queue.pushNoWait(item).mapFailure.errorOption:
return failure(err)
if self.queue.full():
# delete the last item
self.queue.del(self.queue.size - 1)
doAssert self.queue.len <= self.queue.size - 1
# when slots are pushed to the queue, the queue should be unpaused if it was
# paused
if self.paused and not item.seen:
trace "unpausing queue after new slot pushed"
self.unpause()
return success()
proc push*(self: SlotQueue, items: seq[SlotQueueItem]): ?!void =
for item in items:
if err =? self.push(item).errorOption:
return failure(err)
return success()
proc findByRequest(self: SlotQueue, requestId: RequestId): seq[SlotQueueItem] =
var items: seq[SlotQueueItem] = @[]
for item in self.queue.items:
if item.requestId == requestId:
items.add item
return items
proc delete*(self: SlotQueue, item: SlotQueueItem) =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
trace "removing item from queue"
if not self.running:
trace "cannot delete item from queue, queue not running"
return
self.queue.delete(item)
proc delete*(self: SlotQueue, requestId: RequestId, slotIndex: uint16) =
let item = SlotQueueItem(requestId: requestId, slotIndex: slotIndex)
self.delete(item)
proc delete*(self: SlotQueue, requestId: RequestId) =
let items = self.findByRequest(requestId)
for item in items:
self.delete(item)
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
self.queue[i]
proc clearSeenFlags*(self: SlotQueue) =
# Enumerate all items in the queue, overwriting each item with `seen = false`.
# To avoid issues with new queue items being pushed to the queue while all
# items are being iterated (eg if a new storage request comes in and pushes
# new slots to the queue), this routine must remain synchronous.
if self.queue.empty:
return
for item in self.queue.mitems:
item.seen = false # does not maintain the heap invariant
# force heap reshuffling to maintain the heap invariant
doAssert self.queue.update(self.queue[0]), "slot queue failed to reshuffle"
trace "all 'seen' flags cleared"
proc runWorker(self: SlotQueue) {.async: (raises: []).} =
trace "slot queue worker loop started"
while self.running:
try:
if self.paused:
trace "Queue is paused, waiting for new slots or availabilities to be modified/added"
# block until unpaused is true/fired, ie wait for queue to be unpaused
await self.unpaused.wait()
let item = await self.queue.pop() # if queue empty, wait here for new items
logScope:
reqId = item.requestId
slotIdx = item.slotIndex
seen = item.seen
if not self.running: # may have changed after waiting for pop
trace "not running, exiting"
break
# If, upon processing a slot, the slot item already has a `seen` flag set,
# the queue should be paused.
if item.seen:
trace "processing already seen item, pausing queue",
reqId = item.requestId, slotIdx = item.slotIndex
self.pause()
# put item back in queue so that if other items are pushed while paused,
# it will be sorted accordingly. Otherwise, this item would be processed
# immediately (with priority over other items) once unpaused
trace "readding seen item back into the queue"
discard self.push(item) # on error, drop the item and continue
continue
trace "processing item"
without onProcessSlot =? self.onProcessSlot:
raiseAssert "slot queue onProcessSlot not set"
await onProcessSlot(item)
except CancelledError:
trace "slot queue worker cancelled"
break
except CatchableError as e: # raised from self.queue.pop()
warn "slot queue worker error encountered during processing", error = e.msg
trace "slot queue worker loop stopped"
proc start*(self: SlotQueue) =
if self.running:
return
trace "starting slot queue"
self.running = true
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
# task, a new worker will be pushed to the queue
for i in 0 ..< self.maxWorkers:
let worker = self.runWorker()
self.trackedFutures.track(worker)
proc stop*(self: SlotQueue) {.async.} =
if not self.running:
return
trace "stopping slot queue"
self.running = false
await self.trackedFutures.cancelTracked()

View File

@ -1,42 +0,0 @@
import pkg/questionable
import pkg/upraises
import ../errors
import ../utils/asyncstatemachine
import ../market
import ../clock
import ../contracts/requests
export market
export clock
export asyncstatemachine
type
SaleState* = ref object of State
SaleError* = object of CodexError
method onCancelled*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
discard
method onFailed*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
discard
method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: uint64
): ?State {.base, upraises: [].} =
discard
proc cancelledEvent*(request: StorageRequest): Event =
return proc(state: State): ?State =
SaleState(state).onCancelled(request)
proc failedEvent*(request: StorageRequest): Event =
return proc(state: State): ?State =
SaleState(state).onFailed(request)
proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event =
return proc(state: State): ?State =
SaleState(state).onSlotFilled(requestId, slotIndex)

View File

@ -1,62 +0,0 @@
import ../../logutils
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./errored
logScope:
topics = "marketplace sales cancelled"
type SaleCancelled* = ref object of SaleState
method `$`*(state: SaleCancelled): string =
"SaleCancelled"
proc slotIsFilledByMe(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
let host = await market.getHost(requestId, slotIndex)
let me = await market.getSigner()
return host == me.some
method run*(
state: SaleCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let market = agent.context.market
without request =? data.request:
raiseAssert "no sale request"
try:
var returnedCollateral = UInt256.none
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
debug "Collecting collateral and partial payout",
requestId = data.requestId, slotIndex = data.slotIndex
let slot = Slot(request: request, slotIndex: data.slotIndex)
let currentCollateral = await market.currentCollateral(slot.id)
try:
await market.freeSlot(slot.id)
except SlotStateMismatchError as e:
warn "Failed to free slot because slot is already free", error = e.msg
returnedCollateral = currentCollateral.some
if onClear =? agent.context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
warn "Sale cancelled due to timeout",
requestId = data.requestId, slotIndex = data.slotIndex
except CancelledError as e:
trace "SaleCancelled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleCancelled.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,96 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import ../../blocktype as bt
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./cancelled
import ./failed
import ./filled
import ./initialproving
import ./errored
type SaleDownloading* = ref object of SaleState
logScope:
topics = "marketplace sales downloading"
method `$`*(state: SaleDownloading): string =
"SaleDownloading"
method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
return some State(SaleFailed())
method onSlotFilled*(
state: SaleDownloading, requestId: RequestId, slotIndex: uint64
): ?State =
return some State(SaleFilled())
method run*(
state: SaleDownloading, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
let reservations = context.reservations
without onStore =? context.onStore:
raiseAssert "onStore callback not set"
without request =? data.request:
raiseAssert "no sale request"
without reservation =? data.reservation:
raiseAssert("no reservation")
logScope:
requestId = request.id
slotIndex = data.slotIndex
reservationId = reservation.id
availabilityId = reservation.availabilityId
proc onBlocks(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
# release batches of blocks as they are written to disk and
# update availability size
var bytes: uint = 0
for blk in blocks:
if not blk.cid.isEmpty:
bytes += blk.data.len.uint
trace "Releasing batch of bytes written to disk", bytes
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
try:
let requestId = request.id
let slotId = slotId(requestId, data.slotIndex)
let requestState = await market.requestState(requestId)
let isRepairing = (await market.slotState(slotId)) == SlotState.Repair
trace "Retrieving expiry"
var expiry: SecondsSince1970
if state =? requestState and state == RequestState.Started:
expiry = await market.getRequestEnd(requestId)
else:
expiry = await market.requestExpiresAt(requestId)
trace "Starting download"
if err =?
(await onStore(request, expiry, data.slotIndex, onBlocks, isRepairing)).errorOption:
return some State(SaleErrored(error: err, reprocessSlot: false))
trace "Download complete"
return some State(SaleInitialProving())
except CancelledError as e:
trace "SaleDownloading.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleDownloading.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,41 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ../statemachine
import ../salesagent
import ../../logutils
import ../../utils/exceptions
logScope:
topics = "marketplace sales errored"
type SaleErrored* = ref object of SaleState
error*: ref CatchableError
reprocessSlot*: bool
method `$`*(state: SaleErrored): string =
"SaleErrored"
method run*(
state: SaleErrored, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
error "Sale error",
error = state.error.msgDetail,
requestId = data.requestId,
slotIndex = data.slotIndex
try:
if onClear =? context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(reprocessSlot = state.reprocessSlot)
except CancelledError as e:
trace "SaleErrored.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleErrored.run", error = e.msgDetail

View File

@ -1,40 +0,0 @@
import ../../logutils
import ../../utils/exceptions
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./errored
logScope:
topics = "marketplace sales failed"
type
SaleFailed* = ref object of SaleState
SaleFailedError* = object of SaleError
method `$`*(state: SaleFailed): string =
"SaleFailed"
method run*(
state: SaleFailed, machine: Machine
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without request =? data.request:
raiseAssert "no sale request"
try:
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Removing slot from mySlots",
requestId = data.requestId, slotIndex = data.slotIndex
await market.freeSlot(slot.id)
let error = newException(SaleFailedError, "Sale failed")
return some State(SaleErrored(error: error))
except CancelledError as e:
trace "SaleFailed.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleFailed.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,77 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import ../../conf
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./errored
import ./cancelled
import ./failed
import ./proving
when codex_enable_proof_failures:
import ./provingsimulated
logScope:
topics = "marketplace sales filled"
type
SaleFilled* = ref object of SaleState
HostMismatchError* = object of CatchableError
method onCancelled*(state: SaleFilled, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
return some State(SaleFailed())
method `$`*(state: SaleFilled): string =
"SaleFilled"
method run*(
state: SaleFilled, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
try:
let host = await market.getHost(data.requestId, data.slotIndex)
let me = await market.getSigner()
if host == me.some:
info "Slot succesfully filled",
requestId = data.requestId, slotIndex = data.slotIndex
without request =? data.request:
raiseAssert "no sale request"
if onFilled =? agent.onFilled:
onFilled(request, data.slotIndex)
without onExpiryUpdate =? context.onExpiryUpdate:
raiseAssert "onExpiryUpdate callback not set"
let requestEnd = await market.getRequestEnd(data.requestId)
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
return some State(SaleErrored(error: err))
when codex_enable_proof_failures:
if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures
return some State(
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
)
return some State(SaleProving())
else:
let error = newException(HostMismatchError, "Slot filled by other host")
return some State(SaleErrored(error: error))
except CancelledError as e:
trace "SaleFilled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleFilled.run", error = e.msgDetail
return some State(SaleErrored(error: e))

Some files were not shown because too many files have changed in this diff Show More