Compare commits

..

37 Commits

Author SHA1 Message Date
Arnaud
60861d6af8
chore: rename codex to logos storage (#1359) 2025-12-18 17:23:09 +00:00
Eric
49e801803f
ci: remove dist tests and devnet deployment (#1338) 2025-12-17 06:03:59 +00:00
Jacek Sieka
858101c74c
chore: bump eth & networking (#1353) 2025-12-15 10:00:51 +00:00
Jacek Sieka
bd49591fff
chore: bump *-serialization (#1352) 2025-12-12 08:03:56 +00:00
Jacek Sieka
6765beee2c
chore: assorted bumps (#1351) 2025-12-11 21:03:36 +00:00
Jacek Sieka
45fec4b524
chore: bump libbacktrace (#1349) 2025-12-11 20:42:53 +00:00
Jacek Sieka
9ac9f6ff3c
chore: drop usage of upraises (#1348) 2025-12-11 09:03:43 +00:00
Arnaud
bd36032251
feat: add c binding (#1322)
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 07:34:09 +00:00
Chrysostomos Nanakos
be759baf4d
feat: Block exchange optimizations (#1325)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 05:47:02 +00:00
Eric
6147a751f1
fix(ci): Remove macos amd release build (#1337) 2025-11-13 05:37:43 +00:00
Eric
ee47ca8760
feat(libs): Use libp2p multiformats extensions instead of a rolling branch (#1329) 2025-11-13 04:48:33 +00:00
Eric
f791a960f2
fix(ci): Windows SIGILL in CI (#1339) 2025-11-03 11:45:02 +00:00
Arnaud
db8f866db4
feat: check if CID exists in local store (#1331) 2025-11-02 04:32:47 +00:00
Eric
7aca2f0e61
fix(ci): Move conventional commits job to workflow (#1340) 2025-11-02 04:00:55 +00:00
Eric
072bff5cab
fix: ci integration tests (#1335) 2025-10-30 19:38:11 +11:00
Arnaud
af55a761e6
chore: skip marketplace and long integration tests (#1326) 2025-10-22 19:22:33 +11:00
Adam Uhlíř
e3d8d195c3
chore: update nim-libp2p (#1323) 2025-10-01 13:19:15 +02:00
Slava
d1f2e2399b
ci: validate pr title to adhere conventional commits (#1254) 2025-08-12 08:51:41 +00:00
Slava
8cd10edb69
ci: auto deploy codex on devnet (#1302) 2025-07-28 10:02:19 +00:00
Slava
6cf99e255c
ci: release master builds and upload them to the cloud (#1298) 2025-07-10 11:17:11 +00:00
Dmitriy Ryajov
7eb2fb12cc
make default dirs runtime, not compile time. (#1292) 2025-06-26 18:44:24 +00:00
Slava
352273ff81
chore: bump codex-contracts-eth (#1293) 2025-06-26 18:09:48 +00:00
Slava
9ef9258720
chore(ci): bump node to v22 (#1285) 2025-06-26 01:11:00 +00:00
markspanbroek
7927afe715
chore: update nph dependency (#1279)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-25 10:30:48 +00:00
markspanbroek
01615354af
refactor(ci): run integration tests in parallel by spinning up more runners (#1287) 2025-06-25 08:56:16 +00:00
Chrysostomos Nanakos
baff902137
fix: resolve shared block request cancellation conflicts (#1284) 2025-06-24 15:05:25 +00:00
markspanbroek
4d44154a40
fix(ci): remove "update" to gcc-14 on windows (#1288) 2025-06-24 09:00:56 +00:00
markspanbroek
e1c397e112
fix(tests): auto import all tests files and fix forgotten tests (#1281) 2025-06-23 11:18:59 +00:00
Arnaud
7b660e3554
chore(marketplace): use hardhat ignition (#1195) 2025-06-20 15:55:00 +00:00
Arnaud
c5e424ff1b
feat(marketplace) - add status l2 (Linea) network (#1160) 2025-06-20 12:30:40 +00:00
Slava
36f64ad3e6
chore: update testnet marketplace address (#1283) 2025-06-20 06:13:58 +00:00
Ben Bierens
235c0ec842
chore: updates codex-contracts-eth submodule (#1278)
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2025-06-19 10:31:52 +00:00
Arnaud
d443df441d
chore: improve marketplace integration tests (#1268) 2025-06-19 06:36:10 +00:00
Arnaud
e35aec7870
chore: increase gas limits (#1272) 2025-06-18 12:18:56 +00:00
Slava
93e4e0f177
ci(docker): add stable tag for dist-tests images (#1273) 2025-06-16 16:22:09 +00:00
Slava
6db6bf5f72
feat(docker): adjust entrypoint (#1271)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-14 04:25:29 +00:00
Chrysostomos Nanakos
b305e00160
Add support for slot reconstruction on unavailable slot detection (#1235)
Co-authored-by: Arnaud <arnaud@status.im>
2025-06-12 22:19:42 +00:00
242 changed files with 7418 additions and 1663 deletions

View File

@ -81,12 +81,6 @@ runs:
mingw-w64-i686-ntldd-git mingw-w64-i686-ntldd-git
mingw-w64-i686-rust mingw-w64-i686-rust
- name: MSYS2 (Windows All) - Update to gcc 14
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
- name: Install gcc 14 on Linux - name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04) # We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }} if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
@ -224,7 +218,7 @@ runs:
run: | run: |
git config --global core.symlinks false git config --global core.symlinks false
- name: Build Nim and Codex dependencies - name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
which gcc which gcc

View File

@ -3,12 +3,14 @@ Tips for shorter build times
### Runner availability ### ### Runner availability ###
Currently, the biggest bottleneck when optimizing workflows is the availability When running on the Github free, pro or team plan, the bottleneck when
of Windows and macOS runners. Therefore, anything that reduces the time spent in optimizing workflows is the availability of macOS runners. Therefore, anything
Windows or macOS jobs will have a positive impact on the time waiting for that reduces the time spent in macOS jobs will have a positive impact on the
runners to become available. The usage limits for Github Actions are [described time waiting for runners to become available. On the Github enterprise plan,
here][limits]. You can see a breakdown of runner usage for your jobs in the this is not the case and you can more freely use parallelization on multiple
Github Actions tab ([example][usage]). runners. The usage limits for Github Actions are [described here][limits]. You
can see a breakdown of runner usage for your jobs in the Github Actions tab
([example][usage]).
### Windows is slow ### ### Windows is slow ###
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
Breaking up a long build job into several jobs that you run in parallel can have Breaking up a long build job into several jobs that you run in parallel can have
a positive impact on the wall clock time that a workflow runs. For instance, you a positive impact on the wall clock time that a workflow runs. For instance, you
might consider running unit tests and integration tests in parallel. Keep in might consider running unit tests and integration tests in parallel. When
mind however that availability of macOS and Windows runners is the biggest running on the Github free, pro or team plan, keep in mind that availability of
bottleneck. If you split a Windows job into two jobs, you now need to wait for macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
two Windows runners to become available! Therefore parallelization often only need to wait for two macOS runners to become available.
makes sense for Linux jobs.
### Refactoring ### ### Refactoring ###
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
to know whether you introduced a failure on all platforms, or only on a single to know whether you introduced a failure on all platforms, or only on a single
one. You might be tempted to disable fail-fast, but keep in mind that this keeps one. You might be tempted to disable fail-fast, but keep in mind that this keeps
runners busy for longer on a workflow that you know is going to fail anyway. runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed. Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage [usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action [composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows [reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache [cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

View File

@ -24,9 +24,9 @@ jobs:
run: run:
shell: ${{ matrix.shell }} {0} shell: ${{ matrix.shell }} {0}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }} name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
runs-on: ${{ matrix.builder }} runs-on: ${{ matrix.builder }}
timeout-minutes: 120 timeout-minutes: 90
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -49,18 +49,21 @@ jobs:
run: make -j${ncpu} test run: make -j${ncpu} test
- name: Setup Node.js - name: Setup Node.js
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: 20 node-version: 22
- name: Start Ethereum node with Codex contracts - name: Start Ethereum node with Logos Storage contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all' if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth working-directory: vendor/logos-storage-contracts-eth
env: env:
MSYS2_PATH_TYPE: inherit MSYS2_PATH_TYPE: inherit
run: | run: |
npm install npm ci
npm start & npm start &
# Wait for the contracts to be deployed
sleep 5
## Part 2 Tests ## ## Part 2 Tests ##
- name: Contract tests - name: Contract tests
@ -70,13 +73,15 @@ jobs:
## Part 3 Tests ## ## Part 3 Tests ##
- name: Integration tests - name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all' if: matrix.tests == 'integration' || matrix.tests == 'all'
env:
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
run: make -j${ncpu} testIntegration run: make -j${ncpu} testIntegration
- name: Upload integration tests log files - name: Upload integration tests log files
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always() if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
with: with:
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
path: tests/integration/logs/ path: tests/integration/logs/
retention-days: 1 retention-days: 1

View File

@ -16,29 +16,21 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
matrix: matrix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix - name: Compute matrix
id: matrix id: matrix
uses: fabiocaccamo/create-matrix-action@v5 run: |
with: echo 'matrix<<EOF' >> $GITHUB_OUTPUT
matrix: | tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
build: build:
needs: matrix needs: matrix

View File

@ -0,0 +1,19 @@
name: Conventional Commits Linting
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
merge_group:
jobs:
pr-title:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@1.4.1
with:
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'

View File

@ -1,54 +0,0 @@
name: Docker - Dist-Tests
on:
push:
branches:
- master
tags:
- 'v*.*.*'
paths-ignore:
- '**/*.md'
- '.gitignore'
- '.github/**'
- '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml'
- 'docker/**'
- '!docker/codex.Dockerfile'
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
inputs:
run_release_tests:
description: Run Release tests
required: false
type: boolean
default: false
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests"
run_release_tests: ${{ inputs.run_release_tests }}
secrets: inherit

View File

@ -68,6 +68,10 @@ on:
description: Specifies compatible smart contract image description: Specifies compatible smart contract image
required: false required: false
type: string type: string
outputs:
codex_image:
description: Logos Storage Docker image tag
value: ${{ jobs.publish.outputs.codex_image }}
env: env:
@ -83,7 +87,7 @@ env:
TAG_SUFFIX: ${{ inputs.tag_suffix }} TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }} CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests # Tests
TESTS_SOURCE: codex-storage/cs-codex-dist-tests TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }} CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }} CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
@ -91,15 +95,16 @@ env:
jobs: jobs:
# Compute variables
compute: compute:
name: Compute build ID name: Compute build ID
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
build_id: ${{ steps.build_id.outputs.build_id }} build_id: ${{ steps.build_id.outputs.build_id }}
steps: steps:
- name: Generate unique build id - name: Generate unique build id
id: build_id id: build_id
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
# Build platform specific image # Build platform specific image
build: build:
@ -134,7 +139,7 @@ jobs:
run: | run: |
# Create contract label for compatible contract image if specified # Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi fi
- name: Docker - Meta - name: Docker - Meta
@ -189,35 +194,35 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.meta.outputs.version }} version: ${{ steps.meta.outputs.version }}
codex_image: ${{ steps.image_tag.outputs.codex_image }}
needs: [build, compute] needs: [build, compute]
steps: steps:
- name: Docker - Variables - name: Docker - Variables
run: | run: |
# Adjust custom suffix when set and # Adjust custom suffix when set
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi fi
# Disable SHA tags on tagged release # Disable SHA tags on tagged release
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
echo "TAG_SHA=false" >>$GITHUB_ENV echo "TAG_SHA=false" >> $GITHUB_ENV
fi fi
# Handle latest and latest-custom using raw # Handle latest and latest-custom using raw
if [[ ${{ env.TAG_SHA }} == "false" ]]; then if [[ ${{ env.TAG_SHA }} == "false" ]]; then
echo "TAG_LATEST=false" >>$GITHUB_ENV echo "TAG_LATEST=false" >> $GITHUB_ENV
echo "TAG_RAW=true" >>$GITHUB_ENV echo "TAG_RAW=true" >> $GITHUB_ENV
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
else else
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi fi
else else
echo "TAG_RAW=false" >>$GITHUB_ENV echo "TAG_RAW=false" >> $GITHUB_ENV
fi fi
# Create contract label for compatible contract image if specified # Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi fi
- name: Docker - Download digests - name: Docker - Download digests
@ -257,9 +262,12 @@ jobs:
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *) $(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
- name: Docker - Image tag
id: image_tag
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
- name: Docker - Inspect image - name: Docker - Inspect image
run: | run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
# Compute Tests inputs # Compute Tests inputs
@ -308,7 +316,7 @@ jobs:
max-parallel: 1 max-parallel: 1
matrix: matrix:
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }} tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with: with:
source: ${{ needs.compute-tests-inputs.outputs.source }} source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }} branch: ${{ needs.compute-tests-inputs.outputs.branch }}
@ -325,7 +333,7 @@ jobs:
name: Run Release Tests name: Run Release Tests
needs: [compute-tests-inputs] needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }} if: ${{ inputs.run_release_tests == 'true' }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with: with:
source: ${{ needs.compute-tests-inputs.outputs.source }} source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }} branch: ${{ needs.compute-tests-inputs.outputs.branch }}

View File

@ -31,7 +31,7 @@ jobs:
- name: Get submodule short hash - name: Get submodule short hash
id: get-hash id: get-hash
run: | run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth) hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push: build-and-push:
name: Build and Push name: Build and Push

View File

@ -52,7 +52,7 @@ jobs:
node-version: 18 node-version: 18
- name: Build OpenAPI - name: Build OpenAPI
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API" run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection - name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false

View File

@ -8,22 +8,21 @@ env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned nim_version: pinned
jobs: jobs:
matrix: matrix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Compute matrix - name: Checkout sources
id: matrix uses: actions/checkout@v4
uses: fabiocaccamo/create-matrix-action@v5 - name: Compute matrix
with: id: matrix
matrix: | run: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'matrix<<EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} echo 'EOF' >> $GITHUB_OUTPUT
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
build: build:
needs: matrix needs: matrix

View File

@ -4,13 +4,15 @@ on:
push: push:
tags: tags:
- 'v*.*.*' - 'v*.*.*'
branches:
- master
workflow_dispatch: workflow_dispatch:
env: env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned nim_version: pinned
rust_version: 1.79.0 rust_version: 1.79.0
codex_binary_base: codex storage_binary_base: storage
cirdl_binary_base: cirdl cirdl_binary_base: cirdl
build_dir: build build_dir: build
nim_flags: '' nim_flags: ''
@ -30,7 +32,6 @@ jobs:
matrix: | matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2} os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
@ -72,18 +73,18 @@ jobs:
windows*) os_name="windows" ;; windows*) os_name="windows" ;;
esac esac
github_ref_name="${GITHUB_REF_NAME/\//-}" github_ref_name="${GITHUB_REF_NAME/\//-}"
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then if [[ ${os_name} == "windows" ]]; then
codex_binary="${codex_binary}.exe" storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe" cirdl_binary="${cirdl_binary}.exe"
fi fi
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
- name: Release - Build - name: Release - Build
run: | run: |
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}" make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}" make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Libraries - name: Release - Libraries
@ -94,11 +95,11 @@ jobs:
done done
fi fi
- name: Release - Upload codex build artifacts - name: Release - Upload Logos Storage build artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: release-${{ env.codex_binary }} name: release-${{ env.storage_binary }}
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}* path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
retention-days: 30 retention-days: 30
- name: Release - Upload cirdl build artifacts - name: Release - Upload cirdl build artifacts
@ -138,7 +139,7 @@ jobs:
} }
# Compress and prepare # Compress and prepare
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only # Windows - binary only
@ -170,6 +171,34 @@ jobs:
path: /tmp/release/ path: /tmp/release/
retention-days: 30 retention-days: 30
- name: Release - Upload to the cloud
env:
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
s3_bucket: ${{ secrets.S3_BUCKET }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
run: |
# Variables
branch="${GITHUB_REF_NAME/\//-}"
folder="/tmp/release"
# Tagged releases
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
echo "${branch}" > "${folder}"/latest
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
rm -f "${folder}"/latest
# master branch
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
# Custom branch
else
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
fi
- name: Release - name: Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
@ -183,6 +212,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
with: with:
token: ${{ secrets.DISPATCH_PAT }} token: ${{ secrets.DISPATCH_PAT }}
repository: codex-storage/py-codex-api-client repository: logos-storage/logos-storage-py-api-client
event-type: generate event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}' client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

49
.gitmodules vendored
View File

@ -37,22 +37,17 @@
path = vendor/nim-nitro path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git url = https://github.com/status-im/nim-nitro.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/questionable"] [submodule "vendor/questionable"]
path = vendor/questionable path = vendor/questionable
url = https://github.com/status-im/questionable.git url = https://github.com/status-im/questionable.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
[submodule "vendor/asynctest"] [submodule "vendor/asynctest"]
path = vendor/asynctest path = vendor/asynctest
url = https://github.com/status-im/asynctest.git url = https://github.com/status-im/asynctest.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-presto"] [submodule "vendor/nim-presto"]
path = vendor/nim-presto path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git url = https://github.com/status-im/nim-presto.git
@ -132,7 +127,7 @@
path = vendor/nim-websock path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git url = https://github.com/status-im/nim-websock.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-contract-abi"] [submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi url = https://github.com/status-im/nim-contract-abi
@ -160,13 +155,13 @@
path = vendor/nim-taskpools path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git url = https://github.com/status-im/nim-taskpools.git
ignore = untracked ignore = untracked
branch = master branch = stable
[submodule "vendor/nim-leopard"] [submodule "vendor/nim-leopard"]
path = vendor/nim-leopard path = vendor/nim-leopard
url = https://github.com/status-im/nim-leopard.git url = https://github.com/status-im/nim-leopard.git
[submodule "vendor/nim-codex-dht"] [submodule "vendor/logos-storage-nim-dht"]
path = vendor/nim-codex-dht path = vendor/logos-storage-nim-dht
url = https://github.com/codex-storage/nim-codex-dht.git url = https://github.com/logos-storage/logos-storage-nim-dht.git
ignore = untracked ignore = untracked
branch = master branch = master
[submodule "vendor/nim-datastore"] [submodule "vendor/nim-datastore"]
@ -178,9 +173,11 @@
[submodule "vendor/nim-eth"] [submodule "vendor/nim-eth"]
path = vendor/nim-eth path = vendor/nim-eth
url = https://github.com/status-im/nim-eth url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"] [submodule "vendor/logos-storage-contracts-eth"]
path = vendor/codex-contracts-eth path = vendor/logos-storage-contracts-eth
url = https://github.com/status-im/codex-contracts-eth url = https://github.com/logos-storage/logos-storage-contracts-eth.git
ignore = untracked
branch = master
[submodule "vendor/nim-protobuf-serialization"] [submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization url = https://github.com/status-im/nim-protobuf-serialization
@ -195,26 +192,28 @@
url = https://github.com/zevv/npeg url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"] [submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2 path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git url = https://github.com/logos-storage/nim-poseidon2.git
ignore = untracked
branch = master
[submodule "vendor/constantine"] [submodule "vendor/constantine"]
path = vendor/constantine path = vendor/constantine
url = https://github.com/mratsim/constantine.git url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"] [submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git url = https://github.com/logos-storage/nim-circom-compat.git
ignore = untracked ignore = untracked
branch = master branch = master
[submodule "vendor/codex-storage-proofs-circuits"] [submodule "vendor/logos-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits path = vendor/logos-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
ignore = untracked ignore = untracked
branch = master branch = master
[submodule "vendor/nim-serde"] [submodule "vendor/nim-serde"]
path = vendor/nim-serde path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"] [submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"] [submodule "vendor/nim-zippy"]
path = vendor/nim-zippy path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git url = https://github.com/status-im/nim-zippy.git
@ -225,9 +224,9 @@
path = vendor/nim-quic path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git url = https://github.com/vacp2p/nim-quic.git
ignore = untracked ignore = untracked
branch = master branch = main
[submodule "vendor/nim-ngtcp2"] [submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2 path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked ignore = untracked
branch = master branch = main

2
Jenkinsfile vendored
View File

@ -25,7 +25,7 @@ pipeline {
stage('Check') { stage('Check') {
steps { steps {
script { script {
sh './result/bin/codex --version' sh './result/bin/storage --version'
} }
} }
} }

View File

@ -93,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.' # default target, because it's the first one that doesn't start with '.'
# Builds the codex binary # Builds the Logos Storage binary
all: | build deps all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \ echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims $(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl # Build tools/cirdl
cirdl: | deps cirdl: | deps
@ -232,6 +232,7 @@ format:
$(NPH) *.nim $(NPH) *.nim
$(NPH) codex/ $(NPH) codex/
$(NPH) tests/ $(NPH) tests/
$(NPH) library/
clean-nph: clean-nph:
rm -f $(NPH) rm -f $(NPH)
@ -242,4 +243,32 @@ print-nph-path:
clean: | clean-nph clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
endif
endif # "variables.mk" was not included endif # "variables.mk" was not included

View File

@ -1,22 +1,22 @@
# Codex Decentralized Durability Engine # Logos Storage Decentralized Engine
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval. > The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha. > WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability) [![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster) [![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster) [![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex) [![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ) [![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex) ![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
## Build and Run ## Build and Run
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build). For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
To build the project, clone it and run: To build the project, clone it and run:
@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root
Run the client with: Run the client with:
```bash ```bash
build/codex build/storage
``` ```
## Configuration ## Configuration
It is possible to configure a Codex node in several ways: It is possible to configure a Logos Storage node in several ways:
1. CLI options 1. CLI options
2. Environment variables 2. Environment variables
3. Configuration file 3. Configuration file
@ -45,21 +45,71 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
## Guides ## Guides
To get acquainted with Codex, consider: To get acquainted with Logos Storage, consider:
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and; * running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well. * if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API ## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
Currently, only a Go binding is included.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
Build the Go example:
```bash
go build -o storage-go examples/golang/storage.go
```
Export the library path:
```bash
export LD_LIBRARY_PATH=build
```
Run the example:
```bash
./storage-go
```
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development ## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting ### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. `logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`. If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`. In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.

View File

@ -10,17 +10,17 @@ nim c -r run_benchmarks
``` ```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like: By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3 logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed. Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition. You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this. The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI ## Logos Storage Ark Circom CLI
Runs Codex's prover setup with Ark / Circom. Runs Logos Storage's prover setup with Ark / Circom.
Compile: Compile:
```sh ```sh

View File

@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
func default*(tp: typedesc[CircuitEnv]): CircuitEnv = func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir() let codexDir = findCodexProjectDir()
result.nimCircuitCli = result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" / codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli" "proof_input" / "cli"
result.circuitDirIncludes = result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit" codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
result.ptauPath = result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau" codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
@ -118,7 +118,7 @@ proc createCircuit*(
## ##
## All needed circuit files will be generated as needed. ## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like: ## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3` ## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs. ## with all the given CircuitArgs.
## ##
let circdir = circBenchDir let circdir = circBenchDir

View File

@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
) )
benchRuns[benchmarkName] = (runs.avg(), count) benchRuns[benchmarkName] = (runs.avg(), count)
template printBenchMarkSummaries*(printRegular=true, printTsv=true) = template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
if printRegular: if printRegular:
echo "" echo ""
for k, v in benchRuns: for k, v in benchRuns:
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
if printTsv: if printTsv:
echo "" echo ""
echo "name", "\t", "avgTimeSec", "\t", "count" echo "name", "\t", "avgTimeSec", "\t", "count"
for k, v in benchRuns: for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math import std/math
func floorLog2*(x: int): int = func floorLog2*(x: int): int =

View File

@ -3,7 +3,7 @@ mode = ScriptMode.Verbose
import std/os except commandLineParams import std/os except commandLineParams
### Helper functions ### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
if not dirExists "build": if not dirExists "build":
mkDir "build" mkDir "build"
@ -18,57 +18,82 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
let let
# Place build output in 'build' folder, even if name includes a longer path. # Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd = cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
name & ".nim" srcName & ".nim"
exec(cmd) exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") = proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
buildBinary name, srcDir, params if not dirExists "build":
exec "build/" & name mkDir "build"
task codex, "build codex binary": if `type` == "dynamic":
let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " &
"-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " &
params & " " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task storage, "build logos storage binary":
buildBinary "codex", buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE" params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary": task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl" buildBinary "tools/cirdl/cirdl"
task testCodex, "Build & run Codex tests": task testStorage, "Build & run Logos Storage tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true" test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
task testContracts, "Build & run Codex Contract tests": task testContracts, "Build & run Logos Storage Contract tests":
test "testContracts" test "testContracts"
task testIntegration, "Run integration tests": task testIntegration, "Run integration tests":
buildBinary "codex", buildBinary "codex",
outName = "storage",
params = params =
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true" "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
test "testIntegration" test "testIntegration"
# use params to enable logging from the integration test executable # use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " & # test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE" # "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary": task build, "build Logos Storage binary":
codexTask() storageTask()
task test, "Run tests": task test, "Run tests":
testCodexTask() testStorageTask()
task testTools, "Run Tools tests": task testTools, "Run Tools tests":
toolsCirdlTask() toolsCirdlTask()
test "testTools" test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)": task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask() testStorageTask()
testContractsTask() testContractsTask()
testIntegrationTask() testIntegrationTask()
testToolsTask() testToolsTask()
task testTaiko, "Run Taiko L2 tests": task testTaiko, "Run Taiko L2 tests":
codexTask() storageTask()
test "testTaiko" test "testTaiko"
import strutils import strutils
@ -101,7 +126,7 @@ task coverage, "generates code coverage report":
test "coverage", test "coverage",
srcDir = "tests/", srcDir = "tests/",
params = params =
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true" " --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c") exec("rm nimcache/coverage/*.c")
rmDir("coverage") rmDir("coverage")
mkDir("coverage") mkDir("coverage")
@ -121,3 +146,23 @@ task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== " echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "": if findExe("open") != "":
exec("open coverage/report/index.html") exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -45,7 +45,7 @@ when isMainModule:
let config = CodexConf.load( let config = CodexConf.load(
version = codexFullVersion, version = codexFullVersion,
envVarsPrefix = "codex", envVarsPrefix = "storage",
secondarySources = proc( secondarySources = proc(
config: CodexConf, sources: auto config: CodexConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} = ) {.gcsafe, raises: [ConfigurationError].} =
@ -54,6 +54,16 @@ when isMainModule:
, ,
) )
config.setupLogging() config.setupLogging()
try:
updateLogLevel(config.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
config.setupMetrics() config.setupMetrics()
if not (checkAndCreateDataDir((config.dataDir).string)): if not (checkAndCreateDataDir((config.dataDir).string)):
@ -89,15 +99,15 @@ when isMainModule:
try: try:
CodexServer.new(config, privateKey) CodexServer.new(config, privateKey)
except Exception as exc: except Exception as exc:
error "Failed to start Codex", msg = exc.msg error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure quit QuitFailure
## Ctrl+C handling ## Ctrl+C handling
proc doShutdown() = proc doShutdown() =
shutdown = server.stop() shutdown = server.shutdown()
state = CodexStatus.Stopping state = CodexStatus.Stopping
notice "Stopping Codex" notice "Stopping Logos Storage"
proc controlCHandler() {.noconv.} = proc controlCHandler() {.noconv.} =
when defined(windows): when defined(windows):
@ -128,7 +138,7 @@ when isMainModule:
try: try:
waitFor server.start() waitFor server.start()
except CatchableError as error: except CatchableError as error:
error "Codex failed to start", error = error.msg error "Logos Storage failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey, # XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all # but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they # services so they won't crash if we attempt to stop them before they
@ -149,7 +159,7 @@ when isMainModule:
# be assigned before state switches to Stopping # be assigned before state switches to Stopping
waitFor shutdown waitFor shutdown
except CatchableError as error: except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg error "Logos Storage didn't shutdown correctly", error = error.msg
quit QuitFailure quit QuitFailure
notice "Exited codex" notice "Exited Storage"

View File

@ -1,5 +1,5 @@
version = "0.1.0" version = "0.1.0"
author = "Codex Team" author = "Logos Storage Team"
description = "p2p data durability engine" description = "p2p data durability engine"
license = "MIT" license = "MIT"
binDir = "build" binDir = "build"

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -124,6 +124,10 @@ proc start*(b: Advertiser) {.async: (raises: []).} =
trace "Advertiser start" trace "Advertiser start"
# The advertiser is expected to be started only once.
if b.advertiserRunning:
raiseAssert "Advertiser can only be started once — this should not happen"
proc onBlock(cid: Cid) {.async: (raises: []).} = proc onBlock(cid: Cid) {.async: (raises: []).} =
try: try:
await b.advertiseBlock(cid) await b.advertiseBlock(cid)
@ -133,10 +137,6 @@ proc start*(b: Advertiser) {.async: (raises: []).} =
doAssert(b.localStore.onBlockStored.isNone()) doAssert(b.localStore.onBlockStored.isNone())
b.localStore.onBlockStored = onBlock.some b.localStore.onBlockStored = onBlock.some
if b.advertiserRunning:
warn "Starting advertiser twice"
return
b.advertiserRunning = true b.advertiserRunning = true
for i in 0 ..< b.concurrentAdvReqs: for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop() let fut = b.processQueueLoop()

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -8,6 +8,7 @@
## those terms. ## those terms.
import std/sequtils import std/sequtils
import std/algorithm
import pkg/chronos import pkg/chronos
import pkg/libp2p/cid import pkg/libp2p/cid
@ -38,6 +39,7 @@ const
DefaultConcurrentDiscRequests = 10 DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3 DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds DefaultDiscoveryLoopSleep = 3.seconds
type DiscoveryEngine* = ref object of RootObj type DiscoveryEngine* = ref object of RootObj
@ -51,11 +53,32 @@ type DiscoveryEngine* = ref object of RootObj
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests # Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} = proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try: try:
while b.discEngineRunning: while b.discEngineRunning:
@ -78,8 +101,16 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
trace "Discovery request already in progress", cid trace "Discovery request already in progress", cid
continue continue
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid) let haves = b.peers.peersHave(cid)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock: if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid) let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request b.inFlightDiscReqs[cid] = request
@ -156,6 +187,7 @@ proc new*(
concurrentDiscReqs = DefaultConcurrentDiscRequests, concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep, discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock, minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine = ): DiscoveryEngine =
## Create a discovery engine instance for advertising services ## Create a discovery engine instance for advertising services
## ##
@ -171,4 +203,5 @@ proc new*(
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](), inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep, discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock, minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
) )

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,12 +12,14 @@ import std/sets
import std/options import std/options
import std/algorithm import std/algorithm
import std/sugar import std/sugar
import std/random
import pkg/chronos import pkg/chronos
import pkg/libp2p/[cid, switch, multihash, multicodec] import pkg/libp2p/[cid, switch, multihash, multicodec]
import pkg/metrics import pkg/metrics
import pkg/stint import pkg/stint
import pkg/questionable import pkg/questionable
import pkg/stew/shims/sets
import ../../rng import ../../rng
import ../../stores/blockstore import ../../stores/blockstore
@ -63,30 +65,59 @@ declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sen
declareCounter( declareCounter(
codex_block_exchange_blocks_received, "codex blockexchange blocks received" codex_block_exchange_blocks_received, "codex blockexchange blocks received"
) )
declareCounter(
codex_block_exchange_spurious_blocks_received,
"codex blockexchange unrequested/duplicate blocks received",
)
declareCounter(
codex_block_exchange_discovery_requests_total,
"Total number of peer discovery requests sent",
)
declareCounter(
codex_block_exchange_peer_timeouts_total, "Total number of peer activity timeouts"
)
declareCounter(
codex_block_exchange_requests_failed_total,
"Total number of block requests that failed after exhausting retries",
)
const const
DefaultMaxPeersPerRequest* = 10 # The default max message length of nim-libp2p is 100 megabytes, meaning we can
# in principle fit up to 1600 64k blocks per message, so 20 is well under
# that number.
DefaultMaxBlocksPerMessage = 20
DefaultTaskQueueSize = 100 DefaultTaskQueueSize = 100
DefaultConcurrentTasks = 10 DefaultConcurrentTasks = 10
# Don't do more than one discovery request per `DiscoveryRateLimit` seconds.
DiscoveryRateLimit = 3.seconds
DefaultPeerActivityTimeout = 1.minutes
# Match MaxWantListBatchSize to efficiently respond to incoming WantLists
PresenceBatchSize = MaxWantListBatchSize
CleanupBatchSize = 2048
type type
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.} TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.} TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
PeerSelector* =
proc(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx {.gcsafe, raises: [].}
BlockExcEngine* = ref object of RootObj BlockExcEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance localStore*: BlockStore # Local block store for this instance
network*: BlockExcNetwork # Petwork interface network*: BlockExcNetwork # Network interface
peers*: PeerCtxStore # Peers we're currently actively exchanging with peers*: PeerCtxStore # Peers we're currently actively exchanging with
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx] taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
# Peers we're currently processing tasks for selectPeer*: PeerSelector # Peers we're currently processing tasks for
concurrentTasks: int # Number of concurrent peers we're serving at any given time concurrentTasks: int # Number of concurrent peers we're serving at any given time
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
blockexcRunning: bool # Indicates if the blockexc task is running blockexcRunning: bool # Indicates if the blockexc task is running
maxBlocksPerMessage: int
# Maximum number of blocks we can squeeze in a single message
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
wallet*: WalletRef # Nitro wallet for micropayments wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing pricing*: ?Pricing # Optional bandwidth pricing
discovery*: DiscoveryEngine discovery*: DiscoveryEngine
advertiser*: Advertiser advertiser*: Advertiser
lastDiscRequest: Moment # time of last discovery request
Pricing* = object Pricing* = object
address*: EthAddress address*: EthAddress
@ -104,7 +135,6 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
proc start*(self: BlockExcEngine) {.async: (raises: []).} = proc start*(self: BlockExcEngine) {.async: (raises: []).} =
## Start the blockexc task ## Start the blockexc task
## ##
await self.discovery.start() await self.discovery.start()
await self.advertiser.start() await self.advertiser.start()
@ -154,8 +184,145 @@ proc sendWantBlock(
) # we want this remote to send us a block ) # we want this remote to send us a block
codex_block_exchange_want_block_lists_sent.inc() codex_block_exchange_want_block_lists_sent.inc()
proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx = proc sendBatchedWantList(
Rng.instance.sample(peers) self: BlockExcEngine,
peer: BlockExcPeerCtx,
addresses: seq[BlockAddress],
full: bool,
) {.async: (raises: [CancelledError]).} =
var offset = 0
while offset < addresses.len:
let batchEnd = min(offset + MaxWantListBatchSize, addresses.len)
let batch = addresses[offset ..< batchEnd]
trace "Sending want list batch",
peer = peer.id,
batchSize = batch.len,
offset = offset,
total = addresses.len,
full = full
await self.network.request.sendWantList(
peer.id, batch, full = (full and offset == 0)
)
for address in batch:
peer.lastSentWants.incl(address)
offset = batchEnd
proc refreshBlockKnowledge(
self: BlockExcEngine, peer: BlockExcPeerCtx, skipDelta = false, resetBackoff = false
) {.async: (raises: [CancelledError]).} =
if peer.lastSentWants.len > 0:
var toRemove: seq[BlockAddress]
for address in peer.lastSentWants:
if address notin self.pendingBlocks:
toRemove.add(address)
if toRemove.len >= CleanupBatchSize:
await idleAsync()
break
for addr in toRemove:
peer.lastSentWants.excl(addr)
if self.pendingBlocks.wantListLen == 0:
if peer.lastSentWants.len > 0:
trace "Clearing want list tracking, no pending blocks", peer = peer.id
peer.lastSentWants.clear()
return
# We send only blocks that the peer hasn't already told us that they already have.
let
peerHave = peer.peerHave
toAsk = toHashSet(self.pendingBlocks.wantList.toSeq.filterIt(it notin peerHave))
if toAsk.len == 0:
if peer.lastSentWants.len > 0:
trace "Clearing want list tracking, peer has all blocks", peer = peer.id
peer.lastSentWants.clear()
return
let newWants = toAsk - peer.lastSentWants
if peer.lastSentWants.len > 0 and not skipDelta:
if newWants.len > 0:
trace "Sending delta want list update",
peer = peer.id, newWants = newWants.len, totalWants = toAsk.len
await self.sendBatchedWantList(peer, newWants.toSeq, full = false)
if resetBackoff:
peer.wantsUpdated
else:
trace "No changes in want list, skipping send", peer = peer.id
peer.lastSentWants = toAsk
else:
trace "Sending full want list", peer = peer.id, length = toAsk.len
await self.sendBatchedWantList(peer, toAsk.toSeq, full = true)
if resetBackoff:
peer.wantsUpdated
proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledError]).} =
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for peer in self.peers.peers.values.toSeq:
# We refresh block knowledge if:
# 1. the peer hasn't been refreshed in a while;
# 2. the list of blocks we care about has changed.
#
# Note that because of (2), it is important that we update our
# want list in the coarsest way possible instead of over many
# small updates.
#
# In dynamic swarms, staleness will dominate latency.
let
hasNewBlocks = peer.lastRefresh < self.pendingBlocks.lastInclusion
isKnowledgeStale = peer.isKnowledgeStale
if isKnowledgeStale or hasNewBlocks:
if not peer.refreshInProgress:
peer.refreshRequested()
await self.refreshBlockKnowledge(
peer, skipDelta = isKnowledgeStale, resetBackoff = hasNewBlocks
)
else:
trace "Not refreshing: peer is up to date", peer = peer.id
if (Moment.now() - lastIdle) >= runtimeQuota:
try:
await idleAsync()
except CancelledError:
discard
lastIdle = Moment.now()
proc searchForNewPeers(self: BlockExcEngine, cid: Cid) =
if self.lastDiscRequest + DiscoveryRateLimit < Moment.now():
trace "Searching for new peers for", cid = cid
codex_block_exchange_discovery_requests_total.inc()
self.lastDiscRequest = Moment.now() # always refresh before calling await!
self.discovery.queueFindBlocksReq(@[cid])
else:
trace "Not searching for new peers, rate limit not expired", cid = cid
proc evictPeer(self: BlockExcEngine, peer: PeerId) =
## Cleanup disconnected peer
##
trace "Evicting disconnected/departed peer", peer
let peerCtx = self.peers.get(peer)
if not peerCtx.isNil:
for address in peerCtx.blocksRequested:
self.pendingBlocks.clearRequest(address, peer.some)
# drop the peer from the peers table
self.peers.remove(peer)
proc downloadInternal( proc downloadInternal(
self: BlockExcEngine, address: BlockAddress self: BlockExcEngine, address: BlockAddress
@ -173,41 +340,147 @@ proc downloadInternal(
if self.pendingBlocks.retriesExhausted(address): if self.pendingBlocks.retriesExhausted(address):
trace "Error retries exhausted" trace "Error retries exhausted"
codex_block_exchange_requests_failed_total.inc()
handle.fail(newException(RetriesExhaustedError, "Error retries exhausted")) handle.fail(newException(RetriesExhaustedError, "Error retries exhausted"))
break break
trace "Running retry handle"
let peers = self.peers.getPeersForBlock(address) let peers = self.peers.getPeersForBlock(address)
logScope: logScope:
peersWith = peers.with.len peersWith = peers.with.len
peersWithout = peers.without.len peersWithout = peers.without.len
trace "Peers for block" if peers.with.len == 0:
if peers.with.len > 0: # We know of no peers that have the block.
self.pendingBlocks.setInFlight(address, true)
await self.sendWantBlock(@[address], peers.with.randomPeer)
else:
self.pendingBlocks.setInFlight(address, false)
if peers.without.len > 0: if peers.without.len > 0:
await self.sendWantHave(@[address], peers.without) # If we have peers connected but none of them have the block, this
self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) # could be because our knowledge about what they have has run stale.
# Tries to refresh it.
await self.refreshBlockKnowledge()
# Also tries to look for new peers for good measure.
# TODO: in the future, peer search and knowledge maintenance should
# be completely decoupled from one another. It is very hard to
# control what happens and how many neighbors we get like this.
self.searchForNewPeers(address.cidOrTreeCid)
await (handle or sleepAsync(self.pendingBlocks.retryInterval)) let nextDiscovery =
if self.lastDiscRequest + DiscoveryRateLimit > Moment.now():
(self.lastDiscRequest + DiscoveryRateLimit - Moment.now())
else:
0.milliseconds
let retryDelay =
max(secs(rand(self.pendingBlocks.retryInterval.secs)), nextDiscovery)
# We now wait for a bit and then retry. If the handle gets completed in the
# meantime (cause the presence handler might have requested the block and
# received it in the meantime), we are done. Retry delays are randomized
# so we don't get all block loops spinning at the same time.
await handle or sleepAsync(retryDelay)
if handle.finished:
break
# Without decrementing the retries count, this would infinitely loop
# trying to find peers.
self.pendingBlocks.decRetries(address)
# If we still don't have the block, we'll go for another cycle.
trace "No peers for block, will retry shortly"
continue
# Once again, it might happen that the block was requested to a peer
# in the meantime. If so, we don't need to do anything. Otherwise,
# we'll be the ones placing the request.
let scheduledPeer =
if not self.pendingBlocks.isRequested(address):
let peer = self.selectPeer(peers.with)
discard self.pendingBlocks.markRequested(address, peer.id)
peer.blockRequestScheduled(address)
trace "Request block from block retry loop"
await self.sendWantBlock(@[address], peer)
peer
else:
let peerId = self.pendingBlocks.getRequestPeer(address).get()
self.peers.get(peerId)
if scheduledPeer.isNil:
trace "Scheduled peer no longer available, clearing stale request", address
self.pendingBlocks.clearRequest(address)
continue
# Parks until either the block is received, or the peer times out.
let activityTimer = scheduledPeer.activityTimer()
await handle or activityTimer # TODO: or peerDropped
activityTimer.cancel()
# XXX: we should probably not have this. Blocks should be retried
# to infinity unless cancelled by the client.
self.pendingBlocks.decRetries(address) self.pendingBlocks.decRetries(address)
if handle.finished: if handle.finished:
trace "Handle for block finished", failed = handle.failed trace "Handle for block finished", failed = handle.failed
break break
else:
# If the peer timed out, retries immediately.
trace "Peer timed out during block request", peer = scheduledPeer.id
codex_block_exchange_peer_timeouts_total.inc()
await self.network.dropPeer(scheduledPeer.id)
# Evicts peer immediately or we may end up picking it again in the
# next retry.
self.evictPeer(scheduledPeer.id)
except CancelledError as exc: except CancelledError as exc:
trace "Block download cancelled" trace "Block download cancelled"
if not handle.finished: if not handle.finished:
await handle.cancelAndWait() await handle.cancelAndWait()
except RetriesExhaustedError as exc: except RetriesExhaustedError as exc:
warn "Retries exhausted for block", address, exc = exc.msg warn "Retries exhausted for block", address, exc = exc.msg
codex_block_exchange_requests_failed_total.inc()
if not handle.finished: if not handle.finished:
handle.fail(exc) handle.fail(exc)
finally: finally:
self.pendingBlocks.setInFlight(address, false) self.pendingBlocks.clearRequest(address)
proc requestBlocks*(
self: BlockExcEngine, addresses: seq[BlockAddress]
): SafeAsyncIter[Block] =
var handles: seq[BlockHandle]
# Adds all blocks to pendingBlocks before calling the first downloadInternal. This will
# ensure that we don't send incomplete want lists.
for address in addresses:
if address notin self.pendingBlocks:
handles.add(self.pendingBlocks.getWantHandle(address))
for address in addresses:
self.trackedFutures.track(self.downloadInternal(address))
let totalHandles = handles.len
var completed = 0
proc isFinished(): bool =
completed == totalHandles
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
# Be it success or failure, we're completing this future.
let value =
try:
# FIXME: this is super expensive. We're doing several linear scans,
# not to mention all the copying and callback fumbling in `one`.
let
handle = await one(handles)
i = handles.find(handle)
handles.del(i)
success await handle
except CancelledError as err:
warn "Block request cancelled", addresses, err = err.msg
raise err
except CatchableError as err:
error "Error getting blocks from exchange engine", addresses, err = err.msg
failure err
inc(completed)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
proc requestBlock*( proc requestBlock*(
self: BlockExcEngine, address: BlockAddress self: BlockExcEngine, address: BlockAddress
@ -230,63 +503,73 @@ proc requestBlock*(
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} = ): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
self.requestBlock(BlockAddress.init(cid)) self.requestBlock(BlockAddress.init(cid))
proc completeBlock*(self: BlockExcEngine, address: BlockAddress, blk: Block) =
if address in self.pendingBlocks.blocks:
self.pendingBlocks.completeWantHandle(address, blk)
else:
warn "Attempted to complete non-pending block", address
proc blockPresenceHandler*( proc blockPresenceHandler*(
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async: (raises: []).} = ) {.async: (raises: []).} =
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) trace "Received block presence from peer", peer, len = blocks.len
let let
peerCtx = self.peers.get(peer) peerCtx = self.peers.get(peer)
ourWantList = toSeq(self.pendingBlocks.wantList) ourWantList = toHashSet(self.pendingBlocks.wantList.toSeq)
if peerCtx.isNil: if peerCtx.isNil:
return return
peerCtx.refreshReplied()
for blk in blocks: for blk in blocks:
if presence =? Presence.init(blk): if presence =? Presence.init(blk):
peerCtx.setPresence(presence) peerCtx.setPresence(presence)
let let
peerHave = peerCtx.peerHave peerHave = peerCtx.peerHave
dontWantCids = peerHave.filterIt(it notin ourWantList) dontWantCids = peerHave - ourWantList
if dontWantCids.len > 0: if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids) peerCtx.cleanPresence(dontWantCids.toSeq)
let ourWantCids = ourWantList.filterIt( let ourWantCids = ourWantList.filterIt(
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
not self.pendingBlocks.isInFlight(it) self.pendingBlocks.markRequested(it, peer)
) ).toSeq
for address in ourWantCids: for address in ourWantCids:
self.pendingBlocks.setInFlight(address, true)
self.pendingBlocks.decRetries(address) self.pendingBlocks.decRetries(address)
peerCtx.blockRequestScheduled(address)
if ourWantCids.len > 0: if ourWantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = ourWantCids trace "Peer has blocks in our wantList", peer, wants = ourWantCids
# FIXME: this will result in duplicate requests for blocks
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption: if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
warn "Failed to send wantBlock to peer", peer, err = err.msg warn "Failed to send wantBlock to peer", peer, err = err.msg
for address in ourWantCids:
self.pendingBlocks.clearRequest(address, peer.some)
proc scheduleTasks( proc scheduleTasks(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} = ) {.async: (raises: [CancelledError]).} =
let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to # schedule any new peers to provide blocks to
for p in self.peers: for p in self.peers:
for c in cids: # for each cid for blockDelivery in blocksDelivery: # for each cid
# schedule a peer if it wants at least one cid # schedule a peer if it wants at least one cid
# and we have it in our local store # and we have it in our local store
if c in p.peerWantsCids: if blockDelivery.address in p.wantedBlocks:
let cid = blockDelivery.blk.cid
try: try:
if await (c in self.localStore): if await (cid in self.localStore):
# TODO: the try/except should go away once blockstore tracks exceptions # TODO: the try/except should go away once blockstore tracks exceptions
self.scheduleTask(p) self.scheduleTask(p)
break break
except CancelledError as exc: except CancelledError as exc:
warn "Checking local store canceled", cid = c, err = exc.msg warn "Checking local store canceled", cid = cid, err = exc.msg
return return
except CatchableError as exc: except CatchableError as exc:
error "Error checking local store for cid", cid = c, err = exc.msg error "Error checking local store for cid", cid = cid, err = exc.msg
raiseAssert "Unexpected error checking local store for cid" raiseAssert "Unexpected error checking local store for cid"
proc cancelBlocks( proc cancelBlocks(
@ -295,28 +578,45 @@ proc cancelBlocks(
## Tells neighboring peers that we're no longer interested in a block. ## Tells neighboring peers that we're no longer interested in a block.
## ##
let blocksDelivered = toHashSet(addrs)
var scheduledCancellations: Table[PeerId, HashSet[BlockAddress]]
if self.peers.len == 0: if self.peers.len == 0:
return return
trace "Sending block request cancellations to peers", proc dispatchCancellations(
addrs, peers = self.peers.peerIds entry: tuple[peerId: PeerId, addresses: HashSet[BlockAddress]]
): Future[PeerId] {.async: (raises: [CancelledError]).} =
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = trace "Sending block request cancellations to peer",
peer = entry.peerId, addresses = entry.addresses.len
await self.network.request.sendWantCancellations( await self.network.request.sendWantCancellations(
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx) peer = entry.peerId, addresses = entry.addresses.toSeq
) )
return peerCtx return entry.peerId
try: try:
let (succeededFuts, failedFuts) = await allFinishedFailed[BlockExcPeerCtx]( for peerCtx in self.peers.peers.values:
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map( # Do we have pending requests, towards this peer, for any of the blocks
processPeer # that were just delivered?
) let intersection = peerCtx.blocksRequested.intersection(blocksDelivered)
if intersection.len > 0:
# If so, schedules a cancellation.
scheduledCancellations[peerCtx.id] = intersection
if scheduledCancellations.len == 0:
return
let (succeededFuts, failedFuts) = await allFinishedFailed[PeerId](
toSeq(scheduledCancellations.pairs).map(dispatchCancellations)
) )
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx): (await allFinished(succeededFuts)).mapIt(it.read).apply do(peerId: PeerId):
peerCtx.cleanPresence(addrs) let ctx = self.peers.get(peerId)
if not ctx.isNil:
ctx.cleanPresence(addrs)
for address in scheduledCancellations[peerId]:
ctx.blockRequestCancelled(address)
if failedFuts.len > 0: if failedFuts.len > 0:
warn "Failed to send block request cancellations to peers", peers = failedFuts.len warn "Failed to send block request cancellations to peers", peers = failedFuts.len
@ -386,17 +686,31 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
return success() return success()
proc blocksDeliveryHandler*( proc blocksDeliveryHandler*(
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] self: BlockExcEngine,
peer: PeerId,
blocksDelivery: seq[BlockDelivery],
allowSpurious: bool = false,
) {.async: (raises: []).} = ) {.async: (raises: []).} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery] var validatedBlocksDelivery: seq[BlockDelivery]
let peerCtx = self.peers.get(peer)
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for bd in blocksDelivery: for bd in blocksDelivery:
logScope: logScope:
peer = peer peer = peer
address = bd.address address = bd.address
try: try:
# Unknown peers and unrequested blocks are dropped with a warning.
if not allowSpurious and (peerCtx == nil or not peerCtx.blockReceived(bd.address)):
warn "Dropping unrequested or duplicate block received from peer"
codex_block_exchange_spurious_blocks_received.inc()
continue
if err =? self.validateBlockDelivery(bd).errorOption: if err =? self.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg warn "Block validation failed", msg = err.msg
continue continue
@ -416,15 +730,25 @@ proc blocksDeliveryHandler*(
).errorOption: ).errorOption:
warn "Unable to store proof and cid for a block" warn "Unable to store proof and cid for a block"
continue continue
except CancelledError:
trace "Block delivery handling cancelled"
except CatchableError as exc: except CatchableError as exc:
warn "Error handling block delivery", error = exc.msg warn "Error handling block delivery", error = exc.msg
continue continue
validatedBlocksDelivery.add(bd) validatedBlocksDelivery.add(bd)
if (Moment.now() - lastIdle) >= runtimeQuota:
try:
await idleAsync()
except CancelledError:
discard
except CatchableError:
discard
lastIdle = Moment.now()
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let peerCtx = self.peers.get(peer)
if peerCtx != nil: if peerCtx != nil:
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption: if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
warn "Error paying for blocks", err = err.msg warn "Error paying for blocks", err = err.msg
@ -448,16 +772,17 @@ proc wantListHandler*(
presence: seq[BlockPresence] presence: seq[BlockPresence]
schedulePeer = false schedulePeer = false
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
try: try:
for e in wantList.entries: for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope: logScope:
peer = peerCtx.id peer = peerCtx.id
address = e.address address = e.address
wantType = $e.wantType wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants if e.address notin peerCtx.wantedBlocks: # Adding new entry to peer wants
let let
have = have =
try: try:
@ -468,6 +793,8 @@ proc wantListHandler*(
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel: if e.cancel:
# This is sort of expected if we sent the block to the peer, as we have removed
# it from the peer's wantlist ourselves.
trace "Received cancelation for untracked block, skipping", trace "Received cancelation for untracked block, skipping",
address = e.address address = e.address
continue continue
@ -476,12 +803,14 @@ proc wantListHandler*(
case e.wantType case e.wantType
of WantType.WantHave: of WantType.WantHave:
if have: if have:
trace "We HAVE the block", address = e.address
presence.add( presence.add(
BlockPresence( BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price address: e.address, `type`: BlockPresenceType.Have, price: price
) )
) )
else: else:
trace "We DON'T HAVE the block", address = e.address
if e.sendDontHave: if e.sendDontHave:
presence.add( presence.add(
BlockPresence( BlockPresence(
@ -491,28 +820,35 @@ proc wantListHandler*(
codex_block_exchange_want_have_lists_received.inc() codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock: of WantType.WantBlock:
peerCtx.peerWants.add(e) peerCtx.wantedBlocks.incl(e.address)
schedulePeer = true schedulePeer = true
codex_block_exchange_want_block_lists_received.inc() codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants else: # Updating existing entry in peer wants
# peer doesn't want this block anymore # peer doesn't want this block anymore
if e.cancel: if e.cancel:
trace "Canceling want for block", address = e.address trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx) peerCtx.wantedBlocks.excl(e.address)
trace "Canceled block request", trace "Canceled block request",
address = e.address, len = peerCtx.peerWants.len address = e.address, len = peerCtx.wantedBlocks.len
else: else:
trace "Peer has requested a block more than once", address = e.address
if e.wantType == WantType.WantBlock: if e.wantType == WantType.WantBlock:
schedulePeer = true schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request",
address = e.address, len = peerCtx.peerWants.len
if presence.len >= PresenceBatchSize or (Moment.now() - lastIdle) >= runtimeQuota:
if presence.len > 0:
trace "Sending presence batch to remote", items = presence.len
await self.network.request.sendPresence(peer, presence)
presence = @[]
try:
await idleAsync()
except CancelledError:
discard
lastIdle = Moment.now()
# Send any remaining presence messages
if presence.len > 0: if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",") trace "Sending final presence to remote", items = presence.len
await self.network.request.sendPresence(peer, presence) await self.network.request.sendPresence(peer, presence)
if schedulePeer: if schedulePeer:
@ -544,7 +880,7 @@ proc paymentHandler*(
else: else:
context.paymentChannel = self.wallet.acceptChannel(payment).option context.paymentChannel = self.wallet.acceptChannel(payment).option
proc setupPeer*( proc peerAddedHandler*(
self: BlockExcEngine, peer: PeerId self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} = ) {.async: (raises: [CancelledError]).} =
## Perform initial setup, such as want ## Perform initial setup, such as want
@ -554,88 +890,85 @@ proc setupPeer*(
trace "Setting up peer", peer trace "Setting up peer", peer
if peer notin self.peers: if peer notin self.peers:
let peerCtx = BlockExcPeerCtx(id: peer, activityTimeout: DefaultPeerActivityTimeout)
trace "Setting up new peer", peer trace "Setting up new peer", peer
self.peers.add(BlockExcPeerCtx(id: peer)) self.peers.add(peerCtx)
trace "Added peer", peers = self.peers.len trace "Added peer", peers = self.peers.len
await self.refreshBlockKnowledge(peerCtx)
# broadcast our want list, the other peer will do the same
if self.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(self.pendingBlocks.wantList)
await self.network.request.sendWantList(peer, cids, full = true)
if address =? self.pricing .? address: if address =? self.pricing .? address:
trace "Sending account to peer", peer trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address)) await self.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} = proc localLookup(
## Cleanup disconnected peer self: BlockExcEngine, address: BlockAddress
## ): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} =
if address.leaf:
(await self.localStore.getBlockAndProof(address.treeCid, address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(address: address, blk: blkAndProof[0], proof: blkAndProof[1].some)
)
else:
(await self.localStore.getBlock(address)).map(
(blk: Block) => BlockDelivery(address: address, blk: blk, proof: CodexProof.none)
)
trace "Dropping peer", peer iterator splitBatches[T](sequence: seq[T], batchSize: int): seq[T] =
var batch: seq[T]
for element in sequence:
if batch.len == batchSize:
yield batch
batch = @[]
batch.add(element)
# drop the peer from the peers table if batch.len > 0:
self.peers.remove(peer) yield batch
proc taskHandler*( proc taskHandler*(
self: BlockExcEngine, task: BlockExcPeerCtx self: BlockExcEngine, peerCtx: BlockExcPeerCtx
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} = ) {.async: (raises: [CancelledError, RetriesExhaustedError]).} =
# Send to the peer blocks he wants to get, # Send to the peer blocks he wants to get,
# if they present in our local store # if they present in our local store
# TODO: There should be all sorts of accounting of # Blocks that have been sent have already been picked up by other tasks and
# bytes sent/received here # should not be re-sent.
var
wantedBlocks = peerCtx.wantedBlocks.filterIt(not peerCtx.isBlockSent(it))
sent: HashSet[BlockAddress]
var wantsBlocks = trace "Running task for peer", peer = peerCtx.id
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) = for wantedBlock in wantedBlocks:
for peerWant in task.peerWants.mitems: peerCtx.markBlockAsSent(wantedBlock)
if peerWant.address in addresses:
peerWant.inFlight = inFlight
if wantsBlocks.len > 0: try:
# Mark wants as in-flight. for batch in wantedBlocks.toSeq.splitBatches(self.maxBlocksPerMessage):
let wantAddresses = wantsBlocks.mapIt(it.address) var blockDeliveries: seq[BlockDelivery]
updateInFlight(wantAddresses, true) for wantedBlock in batch:
wantsBlocks.sort(SortOrder.Descending) # I/O is blocking so looking up blocks sequentially is fine.
without blockDelivery =? await self.localLookup(wantedBlock), err:
error "Error getting block from local store",
err = err.msg, address = wantedBlock
peerCtx.markBlockAsNotSent(wantedBlock)
continue
blockDeliveries.add(blockDelivery)
sent.incl(wantedBlock)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} = if blockDeliveries.len == 0:
if e.address.leaf: continue
(await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
)
)
else:
(await self.localStore.getBlock(e.address)).map(
(blk: Block) =>
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
)
let await self.network.request.sendBlocksDelivery(peerCtx.id, blockDeliveries)
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) codex_block_exchange_blocks_sent.inc(blockDeliveries.len.int64)
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt: # Drops the batch from the peer's set of wanted blocks; i.e. assumes that after
if bd =? it.value: # we send the blocks, then the peer no longer wants them, so we don't need to
bd # re-send them. Note that the send might still fail down the line and we will
else: # have removed those anyway. At that point, we rely on the requester performing
raiseAssert "Unexpected error in local lookup" # a retry for the request to succeed.
peerCtx.wantedBlocks.keepItIf(it notin sent)
# All the wants that failed local lookup must be set to not-in-flight again. finally:
let # Better safe than sorry: if an exception does happen, we don't want to keep
successAddresses = blocksDelivery.mapIt(it.address) # those as sent, as it'll effectively prevent the blocks from ever being sent again.
failedAddresses = wantAddresses.filterIt(it notin successAddresses) peerCtx.blocksSent.keepItIf(it notin wantedBlocks)
updateInFlight(failedAddresses, false)
if blocksDelivery.len > 0:
trace "Sending blocks to peer",
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
await self.network.request.sendBlocksDelivery(task.id, blocksDelivery)
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
task.peerWants.keepItIf(it.address notin successAddresses)
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
## process tasks ## process tasks
@ -646,11 +979,47 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
while self.blockexcRunning: while self.blockexcRunning:
let peerCtx = await self.taskQueue.pop() let peerCtx = await self.taskQueue.pop()
await self.taskHandler(peerCtx) await self.taskHandler(peerCtx)
except CancelledError:
trace "block exchange task runner cancelled"
except CatchableError as exc: except CatchableError as exc:
error "error running block exchange task", error = exc.msg error "error running block exchange task", error = exc.msg
info "Exiting blockexc task runner" info "Exiting blockexc task runner"
proc selectRandom*(
peers: seq[BlockExcPeerCtx]
): BlockExcPeerCtx {.gcsafe, raises: [].} =
if peers.len == 1:
return peers[0]
proc evalPeerScore(peer: BlockExcPeerCtx): float =
let
loadPenalty = peer.blocksRequested.len.float * 2.0
successRate =
if peer.exchanged > 0:
peer.exchanged.float / (peer.exchanged + peer.blocksRequested.len).float
else:
0.5
failurePenalty = (1.0 - successRate) * 5.0
return loadPenalty + failurePenalty
let
scores = peers.mapIt(evalPeerScore(it))
maxScore = scores.max() + 1.0
weights = scores.mapIt(maxScore - it)
var totalWeight = 0.0
for w in weights:
totalWeight += w
var r = rand(totalWeight)
for i, weight in weights:
r -= weight
if r <= 0.0:
return peers[i]
return peers[^1]
proc new*( proc new*(
T: type BlockExcEngine, T: type BlockExcEngine,
localStore: BlockStore, localStore: BlockStore,
@ -660,7 +1029,9 @@ proc new*(
advertiser: Advertiser, advertiser: Advertiser,
peerStore: PeerCtxStore, peerStore: PeerCtxStore,
pendingBlocks: PendingBlocksManager, pendingBlocks: PendingBlocksManager,
maxBlocksPerMessage = DefaultMaxBlocksPerMessage,
concurrentTasks = DefaultConcurrentTasks, concurrentTasks = DefaultConcurrentTasks,
selectPeer: PeerSelector = selectRandom,
): BlockExcEngine = ): BlockExcEngine =
## Create new block exchange engine instance ## Create new block exchange engine instance
## ##
@ -673,23 +1044,13 @@ proc new*(
wallet: wallet, wallet: wallet,
concurrentTasks: concurrentTasks, concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures(), trackedFutures: TrackedFutures(),
maxBlocksPerMessage: maxBlocksPerMessage,
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery, discovery: discovery,
advertiser: advertiser, advertiser: advertiser,
selectPeer: selectPeer,
) )
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
await self.setupPeer(peerId)
else:
self.dropPeer(peerId)
if not isNil(network.switch):
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler( proc blockWantListHandler(
peer: PeerId, wantList: WantList peer: PeerId, wantList: WantList
): Future[void] {.async: (raises: []).} = ): Future[void] {.async: (raises: []).} =
@ -715,12 +1076,24 @@ proc new*(
): Future[void] {.async: (raises: []).} = ): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment) self.paymentHandler(peer, payment)
proc peerAddedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
await self.peerAddedHandler(peer)
proc peerDepartedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
self.evictPeer(peer)
network.handlers = BlockExcHandlers( network.handlers = BlockExcHandlers(
onWantList: blockWantListHandler, onWantList: blockWantListHandler,
onBlocksDelivery: blocksDeliveryHandler, onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler, onPresence: blockPresenceHandler,
onAccount: accountHandler, onAccount: accountHandler,
onPayment: paymentHandler, onPayment: paymentHandler,
onPeerJoined: peerAddedHandler,
onPeerDeparted: peerDepartedHandler,
) )
return self return self

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -34,7 +34,7 @@ declareGauge(
const const
DefaultBlockRetries* = 3000 DefaultBlockRetries* = 3000
DefaultRetryInterval* = 500.millis DefaultRetryInterval* = 2.seconds
type type
RetriesExhaustedError* = object of CatchableError RetriesExhaustedError* = object of CatchableError
@ -42,7 +42,7 @@ type
BlockReq* = object BlockReq* = object
handle*: BlockHandle handle*: BlockHandle
inFlight*: bool requested*: ?PeerId
blockRetries*: int blockRetries*: int
startTime*: int64 startTime*: int64
@ -50,12 +50,13 @@ type
blockRetries*: int = DefaultBlockRetries blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) = proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*( proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, inFlight = false self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block ## Add an event for a block
## ##
@ -65,11 +66,13 @@ proc getWantHandle*(
do: do:
let blk = BlockReq( let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"), handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight, requested: requested,
blockRetries: self.blockRetries, blockRetries: self.blockRetries,
startTime: getMonoTime().ticks, startTime: getMonoTime().ticks,
) )
self.blocks[address] = blk self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} = proc cleanUpBlock(data: pointer) {.raises: [].} =
@ -86,9 +89,22 @@ proc getWantHandle*(
return handle return handle
proc getWantHandle*( proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, inFlight = false self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), inFlight) self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*( proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
@ -108,9 +124,6 @@ proc resolve*(
blockReq.handle.complete(bd.blk) blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs) codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else: else:
trace "Block handle already finished", address = bd.address trace "Block handle already finished", address = bd.address
@ -128,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
self.blocks.withValue(address, pending): self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0 result = pending[].blockRetries <= 0
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) = func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Set inflight status for a block ## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
## ##
self.blocks.withValue(address, pending): if self.isRequested(address):
pending[].inFlight = inFlight return false
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##
self.blocks.withValue(address, pending): self.blocks.withValue(address, pending):
result = pending[].inFlight pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool = func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks BlockAddress.init(cid) in self.blocks

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -35,15 +35,14 @@ const
DefaultMaxInflight* = 100 DefaultMaxInflight* = 100
type type
WantListHandler* = WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
BlocksDeliveryHandler* = BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).} proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* = BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).} proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).} AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
PaymentHandler* = PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).} PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object BlockExcHandlers* = object
onWantList*: WantListHandler onWantList*: WantListHandler
@ -51,6 +50,9 @@ type
onPresence*: BlockPresenceHandler onPresence*: BlockPresenceHandler
onAccount*: AccountHandler onAccount*: AccountHandler
onPayment*: PaymentHandler onPayment*: PaymentHandler
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc( WantListSender* = proc(
id: PeerId, id: PeerId,
@ -240,96 +242,116 @@ proc handlePayment(
await network.handlers.onPayment(peer.id, payment) await network.handlers.onPayment(peer.id, payment)
proc rpcHandler( proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} = ) {.async: (raises: []).} =
## handle rpc messages ## handle rpc messages
## ##
if msg.wantList.entries.len > 0: if msg.wantList.entries.len > 0:
b.trackedFutures.track(b.handleWantList(peer, msg.wantList)) self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0: if msg.payload.len > 0:
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload)) self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0: if msg.blockPresences.len > 0:
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences)) self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account): if account =? Account.init(msg.account):
b.trackedFutures.track(b.handleAccount(peer, account)) self.trackedFutures.track(self.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment): if payment =? SignedState.init(msg.payment):
b.trackedFutures.track(b.handlePayment(peer, payment)) self.trackedFutures.track(self.handlePayment(peer, payment))
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer ## Creates or retrieves a BlockExcNetwork Peer
## ##
if peer in b.peers: if peer in self.peers:
return b.peers.getOrDefault(peer, nil) return self.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {. var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError]) async: (raises: [CancelledError])
.} = .} =
try: try:
trace "Getting new connection stream", peer trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec) return await self.switch.dial(peer, Codec)
except CancelledError as error: except CancelledError as error:
raise error raise error
except CatchableError as exc: except CatchableError as exc:
trace "Unable to connect to blockexc peer", exc = exc.msg trace "Unable to connect to blockexc peer", exc = exc.msg
if not isNil(b.getConn): if not isNil(self.getConn):
getConn = b.getConn getConn = self.getConn
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} = let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await b.rpcHandler(p, msg) await self.rpcHandler(p, msg)
# create new pubsub peer # create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler) let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
debug "Created new blockexc peer", peer debug "Created new blockexc peer", peer
b.peers[peer] = blockExcPeer self.peers[peer] = blockExcPeer
return blockExcPeer return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) = proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Perform initial setup, such as want
## list exchange
##
discard b.getOrCreatePeer(peer)
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer ## Dial a peer
## ##
if b.isSelf(peer.peerId): if self.isSelf(peer.peerId):
trace "Skipping dialing self", peer = peer.peerId trace "Skipping dialing self", peer = peer.peerId
return return
if peer.peerId in b.peers: if peer.peerId in self.peers:
trace "Already connected to peer", peer = peer.peerId trace "Already connected to peer", peer = peer.peerId
return return
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) = proc dropPeer*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
trace "Dropping peer", peer
try:
if not self.switch.isNil:
await self.switch.disconnect(peer)
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
discard self.getOrCreatePeer(peer)
if not self.handlers.onPeerJoined.isNil:
await self.handlers.onPeerJoined(peer)
proc handlePeerDeparted*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Cleanup disconnected peer ## Cleanup disconnected peer
## ##
trace "Dropping peer", peer trace "Cleaning up departed peer", peer
b.peers.del(peer) self.peers.del(peer)
if not self.handlers.onPeerDeparted.isNil:
await self.handlers.onPeerDeparted(peer)
method init*(self: BlockExcNetwork) = method init*(self: BlockExcNetwork) {.raises: [].} =
## Perform protocol initialization ## Perform protocol initialization
## ##
proc peerEventHandler( proc peerEventHandler(
peerId: PeerId, event: PeerEvent peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = ): Future[void] {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined: if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId) await self.handlePeerJoined(peerId)
elif event.kind == PeerEventKind.Left:
await self.handlePeerDeparted(peerId)
else: else:
self.dropPeer(peerId) warn "Unknown peer event", event
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -24,10 +24,9 @@ logScope:
const DefaultYieldInterval = 50.millis const DefaultYieldInterval = 50.millis
type type
ConnProvider* = ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).} RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
NetworkPeer* = ref object of RootObj NetworkPeer* = ref object of RootObj
id*: PeerId id*: PeerId
@ -65,7 +64,9 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
except CatchableError as err: except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg warn "Exception in blockexc read loop", msg = err.msg
finally: finally:
trace "Detaching read loop", peer = self.id, connId = conn.oid warn "Detaching read loop", peer = self.id, connId = conn.oid
if self.sendConn == conn:
self.sendConn = nil
await conn.close() await conn.close()
proc connect*( proc connect*(
@ -89,7 +90,12 @@ proc send*(
return return
trace "Sending message", peer = self.id, connId = conn.oid trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg)) try:
await conn.writeLp(protobufEncode(msg))
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
func new*( func new*(
T: type NetworkPeer, T: type NetworkPeer,

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -25,28 +25,77 @@ import ../../logutils
export payments, nitro export payments, nitro
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
type BlockExcPeerCtx* = ref object of RootObj type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
account*: ?Account # ethereum account of this peer account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id paymentChannel*: ?ChannelId # payment channel id
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] = proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
toSeq(self.blocks.keys) let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] = if staleness and self.refreshInProgress:
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] = staleness
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool = proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) = func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
self.blocks[presence.address] = presence self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) = func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
price += precense[].price price += precense[].price
price price
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -62,21 +62,23 @@ func len*(self: PeerCtxStore): int =
self.peers.len self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address)) toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid)) toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address)) toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) # FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[]) var res: PeersForBlock = (@[], @[])
for peer in self: for peer in self:
if peer.peerHave.anyIt(it == address): if address in peer:
res.with.add(peer) res.with.add(peer)
else: else:
res.without.add(peer) res.without.add(peer)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,7 +9,6 @@
import std/hashes import std/hashes
import std/sequtils import std/sequtils
import pkg/stew/endians2
import message import message
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash = proc hash*(e: WantListEntry): Hash =
hash(e.address) hash(e.address)

View File

@ -1,4 +1,4 @@
# Protocol of data exchange between Codex nodes # Protocol of data exchange between Logos Storage nodes
# and Protobuf encoder/decoder for these messages. # and Protobuf encoder/decoder for these messages.
# #
# Eventually all this code should be auto-generated from message.proto. # Eventually all this code should be auto-generated from message.proto.
@ -25,11 +25,15 @@ type
WantListEntry* = object WantListEntry* = object
address*: BlockAddress address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1 priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries entries*: seq[WantListEntry] # A list of wantList entries

View File

@ -1,4 +1,4 @@
// Protocol of data exchange between Codex nodes. // Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md // Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3"; syntax = "proto3";

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,16 +9,14 @@
import std/tables import std/tables
import std/sugar import std/sugar
import std/hashes
export tables export tables
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p/[cid, multicodec, multihash] import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils import pkg/stew/[byteutils, endians2]
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
else: else:
"cid: " & $a.cid "cid: " & $a.cid
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid = proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid if a.leaf: a.treeCid else: a.cid

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# TODO: This is super inneficient and needs a rewrite, but it'll do for now # TODO: This is super inneficient and needs a rewrite, but it'll do for now
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -31,7 +28,7 @@ type
ChunkerError* = object of CatchableError ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte] ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {. Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
gcsafe, async: (raises: [ChunkerError, CancelledError]) async: (raises: [ChunkerError, CancelledError])
.} .}
# Reader that splits input data into fixed-size chunks # Reader that splits input data into fixed-size chunks
@ -77,7 +74,7 @@ proc new*(
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var res = 0 var res = 0
try: try:
while res < len: while res < len:
@ -105,7 +102,7 @@ proc new*(
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var total = 0 var total = 0
try: try:
while total < len: while total < len:

View File

@ -1,6 +1,7 @@
{.push raises: [].}
import pkg/chronos import pkg/chronos
import pkg/stew/endians2 import pkg/stew/endians2
import pkg/upraises
import pkg/stint import pkg/stint
type type
@ -8,10 +9,12 @@ type
SecondsSince1970* = int64 SecondsSince1970* = int64
Timeout* = object of CatchableError Timeout* = object of CatchableError
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} = method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
raiseAssert "not implemented" raiseAssert "not implemented"
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} = method waitUntil*(
clock: Clock, time: SecondsSince1970
) {.base, async: (raises: [CancelledError]).} =
raiseAssert "not implemented" raiseAssert "not implemented"
method start*(clock: Clock) {.base, async.} = method start*(clock: Clock) {.base, async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,6 +12,7 @@ import std/strutils
import std/os import std/os
import std/tables import std/tables
import std/cpuinfo import std/cpuinfo
import std/net
import pkg/chronos import pkg/chronos
import pkg/taskpools import pkg/taskpools
@ -21,7 +22,6 @@ import pkg/confutils
import pkg/confutils/defs import pkg/confutils/defs
import pkg/nitro import pkg/nitro
import pkg/stew/io2 import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore import pkg/datastore
import pkg/ethers except Rng import pkg/ethers except Rng
import pkg/stew/io2 import pkg/stew/io2
@ -56,10 +56,21 @@ type
codexNode: CodexNodeRef codexNode: CodexNodeRef
repoStore: RepoStore repoStore: RepoStore
maintenance: BlockMaintainer maintenance: BlockMaintainer
taskpool: Taskpool
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet EthWallet = ethers.Wallet
func config*(self: CodexServer): CodexConf =
return self.config
func node*(self: CodexServer): CodexNodeRef =
return self.codexNode
func repoStore*(self: CodexServer): RepoStore =
return self.repoStore
proc waitForSync(provider: Provider): Future[void] {.async.} = proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1 var sleepTime = 1
trace "Checking sync state of Ethereum provider..." trace "Checking sync state of Ethereum provider..."
@ -127,7 +138,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
# This is used for simulation purposes. Normal nodes won't be compiled with this flag # This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0. # and hence the proof failure will always be 0.
when codex_enable_proof_failures: when storage_enable_proof_failures:
let proofFailures = config.simulateProofFailures let proofFailures = config.simulateProofFailures
if proofFailures > 0: if proofFailures > 0:
warn "Enabling proof failure simulation!" warn "Enabling proof failure simulation!"
@ -158,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
s.codexNode.contracts = (client, host, validator) s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} = proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config if s.isStarted:
warn "Storage server already started, skipping"
return
trace "Starting Storage node", config = $s.config
await s.repoStore.start() await s.repoStore.start()
s.maintenance.start() s.maintenance.start()
await s.codexNode.switch.start() await s.codexNode.switch.start()
@ -174,24 +189,55 @@ proc start*(s: CodexServer) {.async.} =
await s.bootstrapInteractions() await s.bootstrapInteractions()
await s.codexNode.start() await s.codexNode.start()
s.restServer.start()
if s.restServer != nil:
s.restServer.start()
s.isStarted = true
proc stop*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node" if not s.isStarted:
warn "Storage is not started"
return
let res = await noCancel allFinishedFailed[void]( notice "Stopping Storage node"
var futures =
@[ @[
s.restServer.stop(),
s.codexNode.switch.stop(), s.codexNode.switch.stop(),
s.codexNode.stop(), s.codexNode.stop(),
s.repoStore.stop(), s.repoStore.stop(),
s.maintenance.stop(), s.maintenance.stop(),
] ]
)
if s.restServer != nil:
futures.add(s.restServer.stop())
let res = await noCancel allFinishedFailed[void](futures)
if res.failure.len > 0: if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop codex node" raiseAssert "Failed to stop Storage node"
proc close*(s: CodexServer) {.async.} =
var futures = @[s.codexNode.close(), s.repoStore.close()]
let res = await noCancel allFinishedFailed[void](futures)
if not s.taskpool.isNil:
try:
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
proc shutdown*(server: CodexServer) {.async.} =
await server.stop()
await server.close()
proc new*( proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
@ -207,7 +253,7 @@ proc new*(
.withMaxConnections(config.maxPeers) .withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString) .withAgentVersion(config.agentString)
.withSignedPeerRecord(true) .withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr}) .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
.build() .build()
var var
@ -291,7 +337,7 @@ proc new*(
) )
peerStore = PeerCtxStore.new() peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new() pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery) advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery = blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
@ -316,10 +362,13 @@ proc new*(
taskPool = taskpool, taskPool = taskpool,
) )
var restServer: RestServerRef = nil
if config.apiBindAddress.isSome:
restServer = RestServerRef restServer = RestServerRef
.new( .new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort), initTAddress(config.apiBindAddress.get(), config.apiPort),
bufferSize = (1024 * 64), bufferSize = (1024 * 64),
maxRequestBodySize = int.high, maxRequestBodySize = int.high,
) )
@ -333,4 +382,5 @@ proc new*(
restServer: restServer, restServer: restServer,
repoStore: repoStore, repoStore: repoStore,
maintenance: maintenance, maintenance: maintenance,
taskpool: taskpool,
) )

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
{.pop.} {.pop.}
import std/options import std/options
import std/parseutils
import std/strutils import std/strutils
import std/typetraits import std/typetraits
import std/net
import pkg/chronos import pkg/chronos
import pkg/chronicles/helpers import pkg/chronicles/helpers
@ -27,13 +29,12 @@ import pkg/confutils/std/net
import pkg/toml_serialization import pkg/toml_serialization
import pkg/metrics import pkg/metrics
import pkg/metrics/chronos_httpserver import pkg/metrics/chronos_httpserver
import pkg/stew/shims/net as stewnet
import pkg/stew/shims/parseutils
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/libp2p import pkg/libp2p
import pkg/ethers import pkg/ethers
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/base64
import ./codextypes import ./codextypes
import ./discovery import ./discovery
@ -46,13 +47,14 @@ import ./utils/natutils
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
from ./validationconfig import MaxSlots, ValidationGroups from ./validationconfig import MaxSlots, ValidationGroups
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots export ValidationGroups, MaxSlots
export export
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
type ThreadCount* = distinct Natural type ThreadCount* = distinct Natural
@ -61,21 +63,19 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.}
proc defaultDataDir*(): string = proc defaultDataDir*(): string =
let dataDir = let dataDir =
when defined(windows): when defined(windows):
"AppData" / "Roaming" / "Codex" "AppData" / "Roaming" / "Storage"
elif defined(macosx): elif defined(macosx):
"Library" / "Application Support" / "Codex" "Library" / "Application Support" / "Storage"
else: else:
".cache" / "codex" ".cache" / "storage"
getHomeDir() / dataDir getHomeDir() / dataDir
const const
codex_enable_api_debug_peers* {.booldefine.} = false storage_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false storage_enable_proof_failures* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false storage_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
DefaultCircuitDir* = defaultDataDir() / "circuits"
DefaultThreadCount* = ThreadCount(0) DefaultThreadCount* = ThreadCount(0)
type type
@ -137,9 +137,9 @@ type
.}: Port .}: Port
dataDir* {. dataDir* {.
desc: "The directory where codex will store configuration and data", desc: "The directory where Storage will store configuration and data",
defaultValue: DefaultDataDir, defaultValue: defaultDataDir(),
defaultValueDesc: $DefaultDataDir, defaultValueDesc: "",
abbr: "d", abbr: "d",
name: "data-dir" name: "data-dir"
.}: OutDir .}: OutDir
@ -198,14 +198,16 @@ type
.}: ThreadCount .}: ThreadCount
agentString* {. agentString* {.
defaultValue: "Codex", defaultValue: "Logos Storage",
desc: "Node agent string which is used as identifier in network", desc: "Node agent string which is used as identifier in network",
name: "agent-string" name: "agent-string"
.}: string .}: string
apiBindAddress* {. apiBindAddress* {.
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr" desc: "The REST API bind address",
.}: string defaultValue: "127.0.0.1".some,
name: "api-bindaddr"
.}: Option[string]
apiPort* {. apiPort* {.
desc: "The REST Api port", desc: "The REST Api port",
@ -263,6 +265,13 @@ type
name: "block-mn" name: "block-mn"
.}: int .}: int
blockRetries* {.
desc: "Number of times to retry fetching a block before giving up",
defaultValue: DefaultBlockRetries,
defaultValueDesc: $DefaultBlockRetries,
name: "block-retries"
.}: int
cacheSize* {. cacheSize* {.
desc: desc:
"The size of the block cache, 0 disables the cache - " & "The size of the block cache, 0 disables the cache - " &
@ -382,31 +391,31 @@ type
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover: of PersistenceCmd.prover:
circuitDir* {. circuitDir* {.
desc: "Directory where Codex will store proof circuit data", desc: "Directory where Storage will store proof circuit data",
defaultValue: DefaultCircuitDir, defaultValue: defaultDataDir() / "circuits",
defaultValueDesc: $DefaultCircuitDir, defaultValueDesc: "data/circuits",
abbr: "cd", abbr: "cd",
name: "circuit-dir" name: "circuit-dir"
.}: OutDir .}: OutDir
circomR1cs* {. circomR1cs* {.
desc: "The r1cs file for the storage circuit", desc: "The r1cs file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.r1cs", defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs", defaultValueDesc: "data/circuits/proof_main.r1cs",
name: "circom-r1cs" name: "circom-r1cs"
.}: InputFile .}: InputFile
circomWasm* {. circomWasm* {.
desc: "The wasm file for the storage circuit", desc: "The wasm file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.wasm", defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm", defaultValueDesc: "data/circuits/proof_main.wasm",
name: "circom-wasm" name: "circom-wasm"
.}: InputFile .}: InputFile
circomZkey* {. circomZkey* {.
desc: "The zkey file for the storage circuit", desc: "The zkey file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.zkey", defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey", defaultValueDesc: "data/circuits/proof_main.zkey",
name: "circom-zkey" name: "circom-zkey"
.}: InputFile .}: InputFile
@ -476,7 +485,7 @@ func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string = proc getCodexVersion(): string =
let tag = strip(staticExec("git tag")) let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace: if tag.isEmptyOrWhitespace:
return "untagged build" return "untagged build"
return tag return tag
@ -487,7 +496,8 @@ proc getCodexRevision(): string =
return res return res
proc getCodexContractsRevision(): string = proc getCodexContractsRevision(): string =
let res = strip(staticExec("git rev-parse --short HEAD:vendor/codex-contracts-eth")) let res =
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
return res return res
proc getNimBanner(): string = proc getNimBanner(): string =
@ -500,67 +510,85 @@ const
nimBanner* = getNimBanner() nimBanner* = getNimBanner()
codexFullVersion* = codexFullVersion* =
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" & "Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"Codex contracts revision: " & codexContractsRevision & "\p" & nimBanner "\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
proc parseCmdArg*( proc parseCmdArg*(
T: typedesc[MultiAddress], input: string T: typedesc[MultiAddress], input: string
): MultiAddress {.upraises: [ValueError].} = ): MultiAddress {.raises: [ValueError].} =
var ma: MultiAddress var ma: MultiAddress
try: try:
let res = MultiAddress.init(input) let res = MultiAddress.init(input)
if res.isOk: if res.isOk:
ma = res.get() ma = res.get()
else: else:
warn "Invalid MultiAddress", input = input, error = res.error() fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure quit QuitFailure
except LPError as exc: except LPError as exc:
warn "Invalid MultiAddress uri", uri = input, error = exc.msg fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
quit QuitFailure quit QuitFailure
ma ma
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
let count = parseInt(input) try:
if count != 0 and count < 2: let count = parseInt(p)
warn "Invalid number of threads", input = input if count != 0 and count < 2:
quit QuitFailure return err("Invalid number of threads: " & p)
ThreadCount(count) return ok(ThreadCount(count))
except ValueError as e:
return err("Invalid number of threads: " & p & ", error=" & e.msg)
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = proc parseCmdArg*(T: type ThreadCount, input: string): T =
let val = ThreadCount.parse(input)
if val.isErr:
fatal "Cannot parse the thread count.", input = input, error = val.error()
quit QuitFailure
return val.get()
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
var res: SignedPeerRecord var res: SignedPeerRecord
try: try:
if not res.fromURI(uri): if not res.fromURI(p):
warn "Invalid SignedPeerRecord uri", uri = uri return err("The uri is not a valid SignedPeerRecord: " & p)
quit QuitFailure return ok(res)
except LPError as exc: except LPError, Base64Error:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg let e = getCurrentException()
quit QuitFailure return err(e.msg)
except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
res
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} = proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
let res = SignedPeerRecord.parse(uri)
if res.isErr:
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
quit QuitFailure
return res.get()
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
case p.toLowerAscii case p.toLowerAscii
of "any": of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
of "none": of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
of "upnp": of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
of "pmp": of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
else: else:
if p.startsWith("extip:"): if p.startsWith("extip:"):
try: try:
let ip = parseIpAddress(p[6 ..^ 1]) let ip = parseIpAddress(p[6 ..^ 1])
NatConfig(hasExtIp: true, extIp: ip) return ok(NatConfig(hasExtIp: true, extIp: ip))
except ValueError: except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1] let error = "Not a valid IP address: " & p[6 ..^ 1]
raise newException(ValueError, error) return err(error)
else: else:
let error = "Not a valid NAT option: " & p return err("Not a valid NAT option: " & p)
raise newException(ValueError, error)
proc parseCmdArg*(T: type NatConfig, p: string): T =
let res = NatConfig.parse(p)
if res.isErr:
fatal "Cannot parse the NAT config.", error = res.error(), input = p
quit QuitFailure
return res.get()
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] = proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[] return @[]
@ -568,25 +596,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
proc parseCmdArg*(T: type EthAddress, address: string): T = proc parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get() EthAddress.init($address).get()
proc parseCmdArg*(T: type NBytes, val: string): T = func parse*(T: type NBytes, p: string): Result[NBytes, string] =
var num = 0'i64 var num = 0'i64
let count = parseSize(val, num, alwaysBin = true) let count = parseSize(p, num, alwaysBin = true)
if count == 0: if count == 0:
warn "Invalid number of bytes", nbytes = val return err("Invalid number of bytes: " & p)
return ok(NBytes(num))
proc parseCmdArg*(T: type NBytes, val: string): T =
let res = NBytes.parse(val)
if res.isErr:
fatal "Cannot parse NBytes.", error = res.error(), input = val
quit QuitFailure quit QuitFailure
NBytes(num) return res.get()
proc parseCmdArg*(T: type Duration, val: string): T = proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration var dur: Duration
let count = parseDuration(val, dur) let count = parseDuration(val, dur)
if count == 0: if count == 0:
warn "Cannot parse duration", dur = dur fatal "Cannot parse duration", dur = dur
quit QuitFailure quit QuitFailure
dur dur
proc readValue*( proc readValue*(
r: var TomlReader, val: var EthAddress r: var TomlReader, val: var EthAddress
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get() val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
@ -597,7 +631,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
try: try:
val = SignedPeerRecord.parseCmdArg(uri) val = SignedPeerRecord.parseCmdArg(uri)
except LPError as err: except LPError as err:
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
quit QuitFailure quit QuitFailure
proc readValue*(r: var TomlReader, val: var MultiAddress) = proc readValue*(r: var TomlReader, val: var MultiAddress) =
@ -609,12 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk: if res.isOk:
val = res.get() val = res.get()
else: else:
warn "Invalid MultiAddress", input = input, error = res.error() fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure quit QuitFailure
proc readValue*( proc readValue*(
r: var TomlReader, val: var NBytes r: var TomlReader, val: var NBytes
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
var value = 0'i64 var value = 0'i64
var str = r.readValue(string) var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true) let count = parseSize(str, value, alwaysBin = true)
@ -625,7 +659,7 @@ proc readValue*(
proc readValue*( proc readValue*(
r: var TomlReader, val: var ThreadCount r: var TomlReader, val: var ThreadCount
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string) var str = r.readValue(string)
try: try:
val = parseCmdArg(ThreadCount, str) val = parseCmdArg(ThreadCount, str)
@ -634,7 +668,7 @@ proc readValue*(
proc readValue*( proc readValue*(
r: var TomlReader, val: var Duration r: var TomlReader, val: var Duration
) {.upraises: [SerializationError, IOError].} = ) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string) var str = r.readValue(string)
var dur: Duration var dur: Duration
let count = parseDuration(str, dur) let count = parseDuration(str, dur)
@ -701,7 +735,7 @@ proc stripAnsi*(v: string): string =
res res
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
# Updates log levels (without clearing old ones) # Updates log levels (without clearing old ones)
let directives = logLevel.split(";") let directives = logLevel.split(";")
try: try:
@ -770,7 +804,7 @@ proc setupLogging*(conf: CodexConf) =
of LogKind.None: of LogKind.None:
noOutput noOutput
when codex_enable_log_counter: when storage_enable_log_counter:
var counter = 0.uint64 var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) = proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter) inc(counter)
@ -781,15 +815,6 @@ proc setupLogging*(conf: CodexConf) =
else: else:
defaultChroniclesStream.outputs[0].writer = writer defaultChroniclesStream.outputs[0].writer = writer
try:
updateLogLevel(conf.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
proc setupMetrics*(config: CodexConf) = proc setupMetrics*(config: CodexConf) =
if config.metricsEnabled: if config.metricsEnabled:
let metricsAddress = config.metricsAddress let metricsAddress = config.metricsAddress

View File

@ -0,0 +1,8 @@
const ContentIdsExts = [
multiCodec("codex-root"),
multiCodec("codex-manifest"),
multiCodec("codex-block"),
multiCodec("codex-slot-root"),
multiCodec("codex-proving-root"),
multiCodec("codex-slot-cell"),
]

View File

@ -1,13 +1,13 @@
Codex Contracts in Nim Logos Storage Contracts in Nim
======================= =======================
Nim API for the [Codex smart contracts][1]. Nim API for the [Logos Storage smart contracts][1].
Usage Usage
----- -----
For a global overview of the steps involved in starting and fulfilling a For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1]. storage contract, see [Logos Storage Contracts][1].
Smart contract Smart contract
-------------- --------------
@ -144,5 +144,5 @@ await storage
.markProofAsMissing(id, period) .markProofAsMissing(id, period)
``` ```
[1]: https://github.com/status-im/codex-contracts-eth/ [1]: https://github.com/logos-storage/logos-storage-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md [2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md

View File

@ -1,3 +1,5 @@
{.push raises: [].}
import std/times import std/times
import pkg/ethers import pkg/ethers
import pkg/questionable import pkg/questionable
@ -72,7 +74,9 @@ method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()" doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset) return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} = method waitUntil*(
clock: OnChainClock, time: SecondsSince1970
) {.async: (raises: [CancelledError]).} =
while (let difference = time - clock.now(); difference > 0): while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear() clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference)) discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -18,9 +18,12 @@ const knownAddresses = {
# Taiko Alpha-3 Testnet # Taiko Alpha-3 Testnet
"167005": "167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable, {"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Jun 11 2025 17:04:56 PM (+00:00 UTC) # Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
"789987": "789987":
{"Marketplace": Address.init("0xd53a4181862f42641ccA02Fb4CED7D7f19C6920B")}.toTable, {"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
# Linea (Status)
"1660990954":
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
}.toTable }.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address = proc getKnownAddress(T: type, chainId: UInt256): ?Address =

View File

@ -1,7 +1,6 @@
import std/strformat import std/strformat
import std/strutils import std/strutils
import pkg/ethers import pkg/ethers
import pkg/upraises
import pkg/questionable import pkg/questionable
import pkg/lrucache import pkg/lrucache
import ../utils/exceptions import ../utils/exceptions
@ -279,9 +278,10 @@ method fillSlot(
# happen to be the last one to fill a slot in this request # happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot" trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof) let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100) let gasLimit = (gas * 110) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling fillSlot on contract" trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard await market.contract discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides) .fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1) .confirm(1)
@ -303,12 +303,15 @@ method freeSlot*(
# the SP's address as the collateral recipient # the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner() let collateralRecipient = await market.getSigner()
# Add 10% to gas estimate to deal with different evm code flow when we # Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail # happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot( let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient slotId, rewardRecipient, collateralRecipient
) )
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100) let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot( freeSlot = market.contract.freeSlot(
slotId, slotId,
@ -320,10 +323,13 @@ method freeSlot*(
# Otherwise, use the SP's address as both the reward and collateral # Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both) # recipient (the contract will use msg.sender for both)
# Add 10% to gas estimate to deal with different evm code flow when we # Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail # happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId) let gas = await market.contract.estimateGas.freeSlot(slotId)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100) let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(slotId, overrides) freeSlot = market.contract.freeSlot(slotId, overrides)
@ -377,10 +383,14 @@ method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period market: OnChainMarket, id: SlotId, period: Period
) {.async: (raises: [CancelledError, MarketError]).} = ) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"): convertEthersError("Failed to mark proof as missing"):
# Add 10% to gas estimate to deal with different evm code flow when we # Add 50% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail # happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period) let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100) let gasLimit = (gas * 150) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling markProofAsMissing on contract",
estimatedGas = gas, gasLimit = gasLimit
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1) discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
@ -400,10 +410,13 @@ method reserveSlot*(
) {.async: (raises: [CancelledError, MarketError]).} = ) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"): convertEthersError("Failed to reserve slot"):
try: try:
# Add 10% to gas estimate to deal with different evm code flow when we # Add 25% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot # happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex) let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100) let gasLimit = (gas * 125) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1) await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
@ -422,7 +435,7 @@ method canReserveSlot*(
method subscribeRequests*( method subscribeRequests*(
market: OnChainMarket, callback: OnRequest market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} = proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg error "There was an error in Request subscription", msg = eventErr.msg
return return
@ -436,7 +449,7 @@ method subscribeRequests*(
method subscribeSlotFilled*( method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} = proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg error "There was an error in SlotFilled subscription", msg = eventErr.msg
return return
@ -463,7 +476,7 @@ method subscribeSlotFilled*(
method subscribeSlotFreed*( method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} = proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg error "There was an error in SlotFreed subscription", msg = eventErr.msg
return return
@ -477,7 +490,7 @@ method subscribeSlotFreed*(
method subscribeSlotReservationsFull*( method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} = proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription", error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg msg = eventErr.msg
@ -492,7 +505,7 @@ method subscribeSlotReservationsFull*(
method subscribeFulfillment( method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return return
@ -506,7 +519,7 @@ method subscribeFulfillment(
method subscribeFulfillment( method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return return
@ -521,7 +534,7 @@ method subscribeFulfillment(
method subscribeRequestCancelled*( method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return return
@ -535,7 +548,7 @@ method subscribeRequestCancelled*(
method subscribeRequestCancelled*( method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return return
@ -550,7 +563,7 @@ method subscribeRequestCancelled*(
method subscribeRequestFailed*( method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg error "There was an error in RequestFailed subscription", msg = eventErr.msg
return return
@ -564,7 +577,7 @@ method subscribeRequestFailed*(
method subscribeRequestFailed*( method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg error "There was an error in RequestFailed subscription", msg = eventErr.msg
return return
@ -579,7 +592,7 @@ method subscribeRequestFailed*(
method subscribeProofSubmission*( method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} = ): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} = proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
without event =? eventResult, eventErr: without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return return

View File

@ -2,7 +2,7 @@ import std/hashes
import std/sequtils import std/sequtils
import std/typetraits import std/typetraits
import pkg/contractabi import pkg/contractabi
import pkg/nimcrypto import pkg/nimcrypto/keccak
import pkg/ethers/contracts/fields import pkg/ethers/contracts/fields
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/byteutils import pkg/stew/byteutils

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,13 +10,13 @@
{.push raises: [].} {.push raises: [].}
import std/algorithm import std/algorithm
import std/net
import std/sequtils import std/sequtils
import pkg/chronos import pkg/chronos
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope] import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/shims/net
import pkg/contractabi/address as ca import pkg/contractabi/address as ca
import pkg/codexdht/discv5/[routing_table, protocol as discv5] import pkg/codexdht/discv5/[routing_table, protocol as discv5]
from pkg/nimcrypto import keccak256 from pkg/nimcrypto import keccak256
@ -43,6 +43,7 @@ type Discovery* = ref object of RootObj
# record to advertice node connection information, this carry any # record to advertice node connection information, this carry any
# address that the node can be connected on # address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
isStarted: bool
proc toNodeId*(cid: Cid): NodeId = proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id ## Cid to discovery id
@ -157,7 +158,7 @@ method provide*(
method removeProvider*( method removeProvider*(
d: Discovery, peerId: PeerId d: Discovery, peerId: PeerId
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} = ): Future[void] {.base, async: (raises: [CancelledError]).} =
## Remove provider from providers table ## Remove provider from providers table
## ##
@ -203,10 +204,15 @@ proc start*(d: Discovery) {.async: (raises: []).} =
try: try:
d.protocol.open() d.protocol.open()
await d.protocol.start() await d.protocol.start()
d.isStarted = true
except CatchableError as exc: except CatchableError as exc:
error "Error starting discovery", exc = exc.msg error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async: (raises: []).} = proc stop*(d: Discovery) {.async: (raises: []).} =
if not d.isStarted:
warn "Discovery not started, skipping stop"
return
try: try:
await noCancel d.protocol.closeWait() await noCancel d.protocol.closeWait()
except CatchableError as exc: except CatchableError as exc:

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import ../stores import ../stores

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import std/[sugar, atomics, sequtils] import std/[sugar, atomics, sequtils]
@ -25,6 +22,7 @@ import ../logutils
import ../manifest import ../manifest
import ../merkletree import ../merkletree
import ../stores import ../stores
import ../clock
import ../blocktype as bt import ../blocktype as bt
import ../utils import ../utils
import ../utils/asynciter import ../utils/asynciter
@ -120,19 +118,22 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
(idx - step) div steps (idx - step) div steps
proc getPendingBlocks( proc getPendingBlocks(
self: Erasure, manifest: Manifest, indicies: seq[int] self: Erasure, manifest: Manifest, indices: seq[int]
): AsyncIter[(?!bt.Block, int)] = ): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator ## Get pending blocks iterator
## ##
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
var proc attachIndex(
fut: Future[?!bt.Block], i: int
): Future[(?!bt.Block, int)] {.async.} =
## avoids closure capture issues
return (await fut, i)
for blockIndex in indices:
# request blocks from the store # request blocks from the store
pendingBlocks = indicies.map( let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
(i: int) => pendingBlocks.add(attachIndex(fut, blockIndex))
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
(r: ?!bt.Block) => (r, i)
) # Get the data blocks (first K)
)
proc isFinished(): bool = proc isFinished(): bool =
pendingBlocks.len == 0 pendingBlocks.len == 0
@ -168,16 +169,16 @@ proc prepareEncodingData(
strategy = params.strategy.init( strategy = params.strategy.init(
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
) )
indicies = toSeq(strategy.getIndicies(step)) indices = toSeq(strategy.getIndices(step))
pendingBlocksIter = pendingBlocksIter =
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount)) self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
var resolved = 0 var resolved = 0
for fut in pendingBlocksIter: for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err: without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue return failure(err)
let pos = indexToPos(params.steps, idx, step) let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data) shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
@ -185,7 +186,7 @@ proc prepareEncodingData(
resolved.inc() resolved.inc()
for idx in indicies.filterIt(it >= manifest.blocksCount): for idx in indices.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step) let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock) shallowCopy(data[pos], emptyBlock)
@ -218,8 +219,8 @@ proc prepareDecodingData(
strategy = encoded.protectedStrategy.init( strategy = encoded.protectedStrategy.init(
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
) )
indicies = toSeq(strategy.getIndicies(step)) indices = toSeq(strategy.getIndices(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies) pendingBlocksIter = self.getPendingBlocks(encoded, indices)
var var
dataPieces = 0 dataPieces = 0
@ -233,7 +234,7 @@ proc prepareDecodingData(
let (blkOrErr, idx) = await fut let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err: without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue continue
let pos = indexToPos(encoded.steps, idx, step) let pos = indexToPos(encoded.steps, idx, step)
@ -352,7 +353,7 @@ proc asyncEncode*(
return failure(joinErr) return failure(joinErr)
if not task.success.load(): if not task.success.load():
return failure("Leopard encoding failed") return failure("Leopard encoding task failed")
success() success()
@ -382,6 +383,8 @@ proc encodeData(
var var
data = seq[seq[byte]].new() # number of blocks to encode data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int) parity = createDoubleArray(params.ecM, manifest.blockSize.int)
defer:
freeDoubleArray(parity, params.ecM)
data[].setLen(params.ecK) data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow # TODO: this is a tight blocking loop so we sleep here to allow
@ -406,8 +409,6 @@ proc encodeData(
return failure(err) return failure(err)
except CancelledError as exc: except CancelledError as exc:
raise exc raise exc
finally:
freeDoubleArray(parity, params.ecM)
var idx = params.rounded + step var idx = params.rounded + step
for j in 0 ..< params.ecM: for j in 0 ..< params.ecM:
@ -544,17 +545,13 @@ proc asyncDecode*(
return failure(joinErr) return failure(joinErr)
if not task.success.load(): if not task.success.load():
return failure("Leopard encoding failed") return failure("Leopard decoding task failed")
success() success()
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = proc decodeInternal(
## Decode a protected manifest into it's original self: Erasure, encoded: Manifest
## manifest ): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
logScope: logScope:
steps = encoded.steps steps = encoded.steps
rounded_blocks = encoded.rounded rounded_blocks = encoded.rounded
@ -578,6 +575,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
data = seq[seq[byte]].new() data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new() parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int) recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
defer:
freeDoubleArray(recovered, encoded.ecK)
data[].setLen(encoded.ecK) # set len to K data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M parityData[].setLen(encoded.ecM) # set len to M
@ -604,8 +603,6 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return failure(err) return failure(err)
except CancelledError as exc: except CancelledError as exc:
raise exc raise exc
finally:
freeDoubleArray(recovered, encoded.ecK)
for i in 0 ..< encoded.ecK: for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step let idx = i * encoded.steps + step
@ -623,6 +620,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
warn "Unable to store block!", cid = blk.cid, msg = error.msg warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!") return failure("Unable to store block!")
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
cids[idx] = blk.cid cids[idx] = blk.cid
recoveredIndices.add(idx) recoveredIndices.add(idx)
except CancelledError as exc: except CancelledError as exc:
@ -634,6 +633,19 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
finally: finally:
decoder.release() decoder.release()
return (cids, recoveredIndices).success
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err: without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err) return failure(err)
@ -655,6 +667,44 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return decoded.success return decoded.success
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
## Repair a protected manifest by reconstructing the full dataset
##
## `encoded` - the encoded (protected) manifest to
## be repaired
##
without (cids, _) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
without repaired =? (
await self.encode(
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
)
), err:
return failure(err)
if repaired.treeCid != encoded.treeCid:
return failure(
"Original tree root differs from the repaired tree root encoded out of recovered data"
)
return success()
proc start*(self: Erasure) {.async.} = proc start*(self: Erasure) {.async.} =
return return

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -24,13 +24,17 @@ type
IndexingError* = object of CodexError IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError IndexingWrongIterationsError* = object of IndexingError
IndexingWrongGroupCountError* = object of IndexingError
IndexingWrongPadBlockCountError* = object of IndexingError
IndexingStrategy* = object IndexingStrategy* = object
strategyType*: StrategyType strategyType*: StrategyType # Indexing strategy algorithm
firstIndex*: int # Lowest index that can be returned firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations iterations*: int # Number of iteration steps (0 ..< iterations)
step*: int step*: int # Step size between generated indices
groupCount*: int # Number of groups to partition indices into
padBlockCount*: int # Number of padding blocks to append per group
func checkIteration( func checkIteration(
self: IndexingStrategy, iteration: int self: IndexingStrategy, iteration: int
@ -44,39 +48,47 @@ func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}: {.cast(noSideEffect).}:
Iter[int].new(first, last, step) Iter[int].new(first, last, step)
func getLinearIndicies( func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let let
first = self.firstIndex + iteration * self.step first = self.firstIndex + iteration * self.step
last = min(first + self.step - 1, self.lastIndex) last = min(first + self.step - 1, self.lastIndex)
getIter(first, last, 1) getIter(first, last, 1)
func getSteppedIndicies( func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let let
first = self.firstIndex + iteration first = self.firstIndex + iteration
last = self.lastIndex last = self.lastIndex
getIter(first, last, self.iterations) getIter(first, last, self.iterations)
func getIndicies*( func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
case self.strategyType case self.strategyType
of StrategyType.LinearStrategy: of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration) self.getLinearIndices(iteration)
of StrategyType.SteppedStrategy: of StrategyType.SteppedStrategy:
self.getSteppedIndicies(iteration) self.getSteppedIndices(iteration)
func getIndices*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
{.cast(noSideEffect).}:
Iter[int].new(
iterator (): int {.gcsafe.} =
for value in self.getStrategyIndices(iteration):
yield value
for i in 0 ..< self.padBlockCount:
yield self.lastIndex + (iteration + 1) + i * self.groupCount
)
func init*( func init*(
strategy: StrategyType, firstIndex, lastIndex, iterations: int strategy: StrategyType,
firstIndex, lastIndex, iterations: int,
groupCount = 0,
padBlockCount = 0,
): IndexingStrategy {.raises: [IndexingError].} = ): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex: if firstIndex > lastIndex:
raise newException( raise newException(
@ -91,10 +103,24 @@ func init*(
"iterations (" & $iterations & ") must be greater than zero.", "iterations (" & $iterations & ") must be greater than zero.",
) )
if padBlockCount < 0:
raise newException(
IndexingWrongPadBlockCountError,
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
)
if padBlockCount > 0 and groupCount <= 0:
raise newException(
IndexingWrongGroupCountError,
"groupCount (" & $groupCount & ") must be greater than zero.",
)
IndexingStrategy( IndexingStrategy(
strategyType: strategy, strategyType: strategy,
firstIndex: firstIndex, firstIndex: firstIndex,
lastIndex: lastIndex, lastIndex: lastIndex,
iterations: iterations, iterations: iterations,
step: divUp((lastIndex - firstIndex + 1), iterations), step: divUp((lastIndex - firstIndex + 1), iterations),
groupCount: groupCount,
padBlockCount: padBlockCount,
) )

View File

@ -11,7 +11,7 @@
## 4. Remove usages of `nim-json-serialization` from the codebase ## 4. Remove usages of `nim-json-serialization` from the codebase
## 5. Remove need to declare `writeValue` for new types ## 5. Remove need to declare `writeValue` for new types
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent ## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467) ## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
## ##
## When declaring a new type, one should consider importing the `codex/logutils` ## When declaring a new type, one should consider importing the `codex/logutils`
## module, and specifying `formatIt`. If textlines log output and json log output ## module, and specifying `formatIt`. If textlines log output and json log output

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,11 +9,9 @@
# This module implements serialization and deserialization of Manifest # This module implements serialization and deserialization of Manifest
import pkg/upraises
import times import times
push: {.push raises: [].}
{.upraises: [].}
import std/tables import std/tables
import std/sequtils import std/sequtils

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# This module defines all operations on Manifest # This module defines all operations on Manifest
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p/protobuf/minprotobuf import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec] import pkg/libp2p/[cid, multihash, multicodec]

View File

@ -1,5 +1,4 @@
import pkg/chronos import pkg/chronos
import pkg/upraises
import pkg/questionable import pkg/questionable
import pkg/ethers/erc20 import pkg/ethers/erc20
import ./contracts/requests import ./contracts/requests
@ -23,15 +22,15 @@ type
ProofInvalidError* = object of MarketError ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj Subscription* = ref object of RootObj
OnRequest* = OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotReservationsFull* = OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
ProofChallenge* = array[32, byte] ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction # Marketplace events -- located here due to the Market abstraction
@ -275,7 +274,7 @@ method subscribeProofSubmission*(
): Future[Subscription] {.base, async.} = ): Future[Subscription] {.base, async.} =
raiseAssert("not implemented") raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} = method unsubscribe*(subscription: Subscription) {.base, async.} =
raiseAssert("not implemented") raiseAssert("not implemented")
method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*(

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p import pkg/libp2p
import pkg/questionable import pkg/questionable

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -47,28 +47,6 @@ type
CodexProof* = ref object of ByteProof CodexProof* = ref object of ByteProof
mcodec*: MultiCodec mcodec*: MultiCodec
# CodeHashes is not exported from libp2p
# So we need to recreate it instead of
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof = func getProof*(self: CodexTree, index: int): ?!CodexProof =
var proof = CodexProof(mcodec: self.mcodec) var proof = CodexProof(mcodec: self.mcodec)
@ -128,17 +106,12 @@ proc `$`*(self: CodexProof): string =
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " & "CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )" $self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash = func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
## Compress two hashes ## Compress two hashes
## ##
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/codex-storage/nim-codex/issues/1162
let input = @x & @y & @[key.byte] let input = @x & @y & @[key.byte]
var digest = hashes.sha256.hash(input) let digest = ?MultiHash.digest(codec, input).mapFailure
success digest.digestBytes
success @digest
func init*( func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash] _: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
@ -147,12 +120,12 @@ func init*(
return failure "Empty leaves" return failure "Empty leaves"
let let
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash) compress(x, y, key, mcodec)
Zero: ByteHash = newSeq[byte](mhash.size) digestSize = ?mcodec.digestSize.mapFailure
Zero: ByteHash = newSeq[byte](digestSize)
if mhash.size != leaves[0].len: if digestSize != leaves[0].len:
return failure "Invalid hash length" return failure "Invalid hash length"
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
@ -190,12 +163,12 @@ proc fromNodes*(
return failure "Empty nodes" return failure "Empty nodes"
let let
mhash = ?mcodec.mhash() digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](mhash.size) Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash) compress(x, y, key, mcodec)
if mhash.size != nodes[0].len: if digestSize != nodes[0].len:
return failure "Invalid hash length" return failure "Invalid hash length"
var var
@ -228,10 +201,10 @@ func init*(
return failure "Empty nodes" return failure "Empty nodes"
let let
mhash = ?mcodec.mhash() digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](mhash.size) Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} = compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash) compress(x, y, key, mcodec)
success CodexProof( success CodexProof(
compress: compressor, compress: compressor,

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

11
codex/multicodec_exts.nim Normal file
View File

@ -0,0 +1,11 @@
const CodecExts = [
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
("codex-manifest", 0xCD01),
("codex-block", 0xCD02),
("codex-root", 0xCD03),
("codex-slot-root", 0xCD04),
("codex-proving-root", 0xCD05),
("codex-slot-cell", 0xCD06),
]

40
codex/multihash_exts.nim Normal file
View File

@ -0,0 +1,40 @@
import blscurve/bls_public_exports
import pkg/constantine/hashes
import poseidon2
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
if len(output) > 0:
let digest = hashes.sha256.hash(data)
copyMem(addr output[0], addr digest[0], 32)
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.Sponge.digest(data).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
const Sha2256MultiHash* = MHash(
mcodec: multiCodec("sha2-256"),
size: sha256.sizeDigest,
coder: sha2_256hash_constantine,
)
const HashExts = [
# override sha2-256 hash function
Sha2256MultiHash,
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
size: 32,
coder: poseidon2_sponge_rate2,
),
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
size: 32,
coder: poseidon2_merkle_2kb_sponge,
),
]

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -10,10 +10,10 @@
import import
std/[options, os, strutils, times, net, atomics], std/[options, os, strutils, times, net, atomics],
stew/shims/net as stewNet, stew/[objects],
stew/[objects, results],
nat_traversal/[miniupnpc, natpmp], nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net json_serialization/std/net,
results
import pkg/chronos import pkg/chronos
import pkg/chronicles import pkg/chronicles

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -44,7 +44,7 @@ import ./indexingstrategy
import ./utils import ./utils
import ./errors import ./errors
import ./logutils import ./logutils
import ./utils/asynciter import ./utils/safeasynciter
import ./utils/trackedfutures import ./utils/trackedfutures
export logutils export logutils
@ -52,7 +52,10 @@ export logutils
logScope: logScope:
topics = "codex node" topics = "codex node"
const DefaultFetchBatch = 10 const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type type
Contracts* = Contracts* =
@ -78,9 +81,9 @@ type
CodexNodeRef* = ref CodexNode CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].} OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {. BatchProc* =
gcsafe, async: (raises: [CancelledError]) proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
.} OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: CodexNodeRef): Switch = func switch*(self: CodexNodeRef): Switch =
return self.switch return self.switch
@ -186,34 +189,62 @@ proc fetchBatched*(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i)) # (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# ) # )
while not iter.finished: # Sliding window: maintain batchSize blocks in-flight
let blockFutures = collect: let
for i in 0 ..< batchSize: refillThreshold = int(float(batchSize) * BatchRefillThreshold)
if not iter.finished: refillSize = max(refillThreshold, 1)
let address = BlockAddress.init(cid, iter.next()) maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
if blockFutures.len == 0: var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
var blockResults = await self.networkStore.getBlocks(addresses)
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue continue
without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err: inc(successfulBlocks)
trace "Some blocks failed to fetch", err = err.msg inc(completedInWindow)
return failure(err)
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value) if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
let numOfFailedBlocks = blockResults.len - blocks.len if completedInWindow >= refillThreshold and not iter.finished:
if numOfFailedBlocks > 0: var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
return for i in 0 ..< refillSize:
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")") if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption: if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr) return failure(batchErr)
if not iter.finished:
await sleepAsync(1.millis)
success() success()
proc fetchBatched*( proc fetchBatched*(
@ -403,6 +434,7 @@ proc store*(
filename: ?string = string.none, filename: ?string = string.none,
mimetype: ?string = string.none, mimetype: ?string = string.none,
blockSize = DefaultBlockSize, blockSize = DefaultBlockSize,
onBlockStored: OnBlockStoredProc = nil,
): Future[?!Cid] {.async.} = ): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize ## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest ## to nodes's BlockStore, and return Cid of its manifest
@ -432,6 +464,9 @@ proc store*(
if err =? (await self.networkStore.putBlock(blk)).errorOption: if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}") return failure(&"Unable to store block {blk.cid}")
if not onBlockStored.isNil:
onBlockStored(chunk)
except CancelledError as exc: except CancelledError as exc:
raise exc raise exc
except CatchableError as exc: except CatchableError as exc:
@ -639,10 +674,6 @@ proc onStore(
trace "Received a request to store a slot" trace "Received a request to store a slot"
# TODO: Use the isRepairing to manage the slot download.
# If isRepairing is true, the slot has to be repaired before
# being downloaded.
without manifest =? (await self.fetchManifest(cid)), err: without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err) return failure(err)
@ -675,32 +706,45 @@ proc onStore(
return success() return success()
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if slotIdx > int.high.uint64: if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx error "Cannot cast slot index to int", slotIndex = slotIdx
return return
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err: if isRepairing:
trace "Unable to get indicies from strategy", err = err.msg trace "start repairing slot", slotIdx
return failure(err) try:
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
if err =? (await erasure.repair(manifest)).errorOption:
error "Unable to erasure decode repairing manifest",
cid = manifest.treeCid, exc = err.msg
return failure(err)
except CatchableError as exc:
error "Error erasure decoding repairing manifest",
cid = manifest.treeCid, exc = exc.msg
return failure(exc.msg)
else:
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if err =? ( without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry) trace "Unable to get indices from strategy", err = err.msg
).errorOption: return failure(err)
trace "Unable to fetch blocks", err = err.msg
return failure(err) if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err: without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
trace "Unable to build slot", err = err.msg trace "Unable to build slot", err = err.msg
return failure(err) return failure(err)
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]: if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch", trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
@ -837,14 +881,11 @@ proc start*(self: CodexNodeRef) {.async.} =
self.contracts.validator = ValidatorInteractions.none self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} = proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node" trace "Stopping node"
if not self.taskpool.isNil:
self.taskpool.shutdown()
await self.trackedFutures.cancelTracked() await self.trackedFutures.cancelTracked()
if not self.engine.isNil: if not self.engine.isNil:
@ -865,6 +906,7 @@ proc stop*(self: CodexNodeRef) {.async.} =
if not self.clock.isNil: if not self.clock.isNil:
await self.clock.stop() await self.clock.stop()
proc close*(self: CodexNodeRef) {.async.} =
if not self.networkStore.isNil: if not self.networkStore.isNil:
await self.networkStore.close await self.networkStore.close
@ -891,3 +933,10 @@ proc new*(
contracts: contracts, contracts: contracts,
trackedFutures: TrackedFutures(), trackedFutures: TrackedFutures(),
) )
proc hasLocalBlock*(
self: CodexNodeRef, cid: Cid
): Future[bool] {.async: (raises: [CancelledError]).} =
## Returns true if the given Cid is present in the local store
return await (cid in self.networkStore.localStore)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import std/sequtils import std/sequtils
import std/mimetypes import std/mimetypes
@ -183,7 +180,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) = proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
router.api(MethodOptions, "/api/codex/v1/data") do( router.api(MethodOptions, "/api/storage/v1/data") do(
resp: HttpResponseRef resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
if corsOrigin =? allowedOrigin: if corsOrigin =? allowedOrigin:
@ -195,7 +192,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse: router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner ## Upload a file in a streaming manner
## ##
@ -257,11 +254,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
finally: finally:
await reader.closeWait() await reader.closeWait()
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse:
let json = await formatManifestBlocks(node) let json = await formatManifestBlocks(node)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do( router.api(MethodOptions, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
if corsOrigin =? allowedOrigin: if corsOrigin =? allowedOrigin:
@ -270,7 +267,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.api(MethodGet, "/api/codex/v1/data/{cid}") do( router.api(MethodGet, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -286,7 +283,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
await node.retrieveCid(cid.get(), local = true, resp = resp) await node.retrieveCid(cid.get(), local = true, resp = resp)
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do( router.api(MethodDelete, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Deletes either a single block or an entire dataset ## Deletes either a single block or an entire dataset
@ -307,7 +304,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Download a file from the network to the local node ## Download a file from the network to the local node
@ -328,7 +325,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest) let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do( router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Download a file from the network in a streaming ## Download a file from the network in a streaming
@ -347,7 +344,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition") resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp) await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do(
cid: Cid, resp: HttpResponseRef cid: Cid, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
## Download only the manifest. ## Download only the manifest.
@ -365,7 +362,23 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest) let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Only test if the give CID is available in the local store
##
var headers = buildCorsHeaders("GET", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
let cid = cid.get()
let hasCid = await node.hasLocalBlock(cid)
let json = %*{$cid: hasCid}
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
let json = let json =
%RestRepoStore( %RestRepoStore(
totalBlocks: repoStore.totalBlocks, totalBlocks: repoStore.totalBlocks,
@ -378,7 +391,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
## Returns active slots for the host ## Returns active slots for the host
@ -396,7 +409,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do( router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
slotId: SlotId slotId: SlotId
) -> RestApiResponse: ) -> RestApiResponse:
## Returns active slot with id {slotId} for the host. Returns 404 if the ## Returns active slot with id {slotId} for the host. Returns 404 if the
@ -426,7 +439,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
restAgent.toJson, contentType = "application/json", headers = headers restAgent.toJson, contentType = "application/json", headers = headers
) )
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Returns storage that is for sale ## Returns storage that is for sale
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -448,7 +461,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse: router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Add available storage to sell. ## Add available storage to sell.
## Every time Availability's offer finishes, its capacity is ## Every time Availability's offer finishes, its capacity is
## returned to the availability. ## returned to the availability.
@ -528,7 +541,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do( router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId, resp: HttpResponseRef id: AvailabilityId, resp: HttpResponseRef
) -> RestApiResponse: ) -> RestApiResponse:
if corsOrigin =? allowedOrigin: if corsOrigin =? allowedOrigin:
@ -537,7 +550,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
resp.status = Http204 resp.status = Http204
await resp.sendBody("") await resp.sendBody("")
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do( router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId id: AvailabilityId
) -> RestApiResponse: ) -> RestApiResponse:
## Updates Availability. ## Updates Availability.
@ -625,7 +638,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500) return RestApiResponse.error(Http500)
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do( router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
id: AvailabilityId id: AvailabilityId
) -> RestApiResponse: ) -> RestApiResponse:
## Gets Availability's reservations. ## Gets Availability's reservations.
@ -669,7 +682,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin let allowedOrigin = router.allowedOrigin
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do( router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
cid: Cid cid: Cid
) -> RestApiResponse: ) -> RestApiResponse:
var headers = buildCorsHeaders("POST", allowedOrigin) var headers = buildCorsHeaders("POST", allowedOrigin)
@ -779,7 +792,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do( router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
id: PurchaseId id: PurchaseId
) -> RestApiResponse: ) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -811,7 +824,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
try: try:
@ -833,7 +846,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
## various node management api's ## various node management api's
## ##
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse:
## Returns node SPR in requested format, json or text. ## Returns node SPR in requested format, json or text.
## ##
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -856,7 +869,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse:
## Returns node's peerId in requested format, json or text. ## Returns node's peerId in requested format, json or text.
## ##
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -875,7 +888,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do( router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do(
peerId: PeerId, addrs: seq[MultiAddress] peerId: PeerId, addrs: seq[MultiAddress]
) -> RestApiResponse: ) -> RestApiResponse:
## Connect to a peer ## Connect to a peer
@ -913,7 +926,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse: router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse:
## Print rudimentary node information ## Print rudimentary node information
## ##
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)
@ -933,7 +946,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
"", "",
"announceAddresses": node.discovery.announceAddrs, "announceAddresses": node.discovery.announceAddrs,
"table": table, "table": table,
"codex": { "storage": {
"version": $codexVersion, "version": $codexVersion,
"revision": $codexRevision, "revision": $codexRevision,
"contracts": $codexContractsRevision, "contracts": $codexContractsRevision,
@ -948,7 +961,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do( router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do(
level: Option[string] level: Option[string]
) -> RestApiResponse: ) -> RestApiResponse:
## Set log level at run time ## Set log level at run time
@ -974,8 +987,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers) return RestApiResponse.error(Http500, headers = headers)
when codex_enable_api_debug_peers: when storage_enable_api_debug_peers:
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do( router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do(
peerId: PeerId peerId: PeerId
) -> RestApiResponse: ) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin) var headers = buildCorsHeaders("GET", allowedOrigin)

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/libp2p/crypto/crypto import pkg/libp2p/crypto/crypto
import pkg/bearssl/rand import pkg/bearssl/rand

View File

@ -22,7 +22,7 @@ import ./utils/exceptions
## Sales holds a list of available storage that it may sell. ## Sales holds a list of available storage that it may sell.
## ##
## When storage is requested on the market that matches availability, the Sales ## When storage is requested on the market that matches availability, the Sales
## object will instruct the Codex node to persist the requested data. Once the ## object will instruct the Logos Storage node to persist the requested data. Once the
## data has been persisted, it uploads a proof of storage to the market in an ## data has been persisted, it uploads a proof of storage to the market in an
## attempt to win a storage contract. ## attempt to win a storage contract.
## ##

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -27,9 +27,7 @@
## | UInt256 | totalRemainingCollateral | | ## | UInt256 | totalRemainingCollateral | |
## +---------------------------------------------------+ ## +---------------------------------------------------+
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import std/sequtils import std/sequtils
import std/sugar import std/sugar
@ -38,7 +36,6 @@ import std/sequtils
import std/times import std/times
import pkg/chronos import pkg/chronos
import pkg/datastore import pkg/datastore
import pkg/nimcrypto
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stint import pkg/stint
@ -55,6 +52,8 @@ import ../units
export requests export requests
export logutils export logutils
from nimcrypto import randomBytes
logScope: logScope:
topics = "marketplace sales reservations" topics = "marketplace sales reservations"
@ -92,14 +91,10 @@ type
repo: RepoStore repo: RepoStore
OnAvailabilitySaved: ?OnAvailabilitySaved OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {. GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
upraises: [], gcsafe, async: (raises: [CancelledError]), closure IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
.} OnAvailabilitySaved* =
IterDispose* = proc(availability: Availability): Future[void] {.async: (raises: []).}
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
upraises: [], gcsafe, async: (raises: [])
.}
StorableIter* = ref object StorableIter* = ref object
finished*: bool finished*: bool
next*: GetNext next*: GetNext

View File

@ -2,7 +2,6 @@ import pkg/chronos
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stint import pkg/stint
import pkg/upraises
import ../contracts/requests import ../contracts/requests
import ../errors import ../errors
import ../logutils import ../logutils
@ -113,14 +112,12 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
method onFulfilled*( method onFulfilled*(
agent: SalesAgent, requestId: RequestId agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} = ) {.base, gcsafe, raises: [].} =
let cancelled = agent.data.cancelled let cancelled = agent.data.cancelled
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished: if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
cancelled.cancelSoon() cancelled.cancelSoon()
method onFailed*( method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
without request =? agent.data.request: without request =? agent.data.request:
return return
if agent.data.requestId == requestId: if agent.data.requestId == requestId:
@ -128,7 +125,7 @@ method onFailed*(
method onSlotFilled*( method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: uint64 agent: SalesAgent, requestId: RequestId, slotIndex: uint64
) {.base, gcsafe, upraises: [].} = ) {.base, gcsafe, raises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex)) agent.schedule(slotFilledEvent(requestId, slotIndex))

View File

@ -1,6 +1,5 @@
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/upraises
import pkg/libp2p/cid import pkg/libp2p/cid
import ../market import ../market
@ -24,21 +23,20 @@ type
slotQueue*: SlotQueue slotQueue*: SlotQueue
simulateProofFailures*: int simulateProofFailures*: int
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {. BlocksCb* =
gcsafe, async: (raises: [CancelledError]) proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
.}
OnStore* = proc( OnStore* = proc(
request: StorageRequest, request: StorageRequest,
expiry: SecondsSince1970, expiry: SecondsSince1970,
slot: uint64, slot: uint64,
blocksCb: BlocksCb, blocksCb: BlocksCb,
isRepairing: bool, isRepairing: bool,
): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).} ): Future[?!void] {.async: (raises: [CancelledError]).}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, async: (raises: [CancelledError]) async: (raises: [CancelledError])
.} .}
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {. OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
gcsafe, async: (raises: [CancelledError]) async: (raises: [CancelledError])
.} .}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].} OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].} OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}

View File

@ -15,8 +15,7 @@ logScope:
topics = "marketplace slotqueue" topics = "marketplace slotqueue"
type type
OnProcessSlot* = OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
proc(item: SlotQueueItem): Future[void] {.gcsafe, async: (raises: []).}
# Non-ref obj copies value when assigned, preventing accidental modification # Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg # of values which could cause an incorrect order (eg

View File

@ -1,5 +1,4 @@
import pkg/questionable import pkg/questionable
import pkg/upraises
import ../errors import ../errors
import ../utils/asyncstatemachine import ../utils/asyncstatemachine
import ../market import ../market
@ -16,17 +15,17 @@ type
method onCancelled*( method onCancelled*(
state: SaleState, request: StorageRequest state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} = ): ?State {.base, raises: [].} =
discard discard
method onFailed*( method onFailed*(
state: SaleState, request: StorageRequest state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} = ): ?State {.base, raises: [].} =
discard discard
method onSlotFilled*( method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: uint64 state: SaleState, requestId: RequestId, slotIndex: uint64
): ?State {.base, upraises: [].} = ): ?State {.base, raises: [].} =
discard discard
proc cancelledEvent*(request: StorageRequest): Event = proc cancelledEvent*(request: StorageRequest): Event =

View File

@ -1,6 +1,5 @@
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/upraises
import ../statemachine import ../statemachine
import ../salesagent import ../salesagent

View File

@ -11,7 +11,7 @@ import ./cancelled
import ./failed import ./failed
import ./proving import ./proving
when codex_enable_proof_failures: when storage_enable_proof_failures:
import ./provingsimulated import ./provingsimulated
logScope: logScope:
@ -59,7 +59,7 @@ method run*(
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
return some State(SaleErrored(error: err)) return some State(SaleErrored(error: err))
when codex_enable_proof_failures: when storage_enable_proof_failures:
if context.simulateProofFailures > 0: if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures info "Proving with failure rate", rate = context.simulateProofFailures
return some State( return some State(

View File

@ -61,7 +61,7 @@ method run*(
return some State(SaleIgnored(reprocessSlot: false)) return some State(SaleIgnored(reprocessSlot: false))
# TODO: Once implemented, check to ensure the host is allowed to fill the slot, # TODO: Once implemented, check to ensure the host is allowed to fill the slot,
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) # due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
logScope: logScope:
slotIndex = data.slotIndex slotIndex = data.slotIndex

View File

@ -1,5 +1,5 @@
import ../../conf import ../../conf
when codex_enable_proof_failures: when storage_enable_proof_failures:
import std/strutils import std/strutils
import pkg/stint import pkg/stint
import pkg/ethers import pkg/ethers

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -113,17 +113,17 @@ func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural =
self.numBlockCells * self.numSlotBlocks self.numBlockCells * self.numSlotBlocks
func slotIndiciesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] = func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
## Returns the slot indices. ## Returns the slot indices.
## ##
self.strategy.getIndicies(slot).catch self.strategy.getIndices(slot).catch
func slotIndicies*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] = func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
## Returns the slot indices. ## Returns the slot indices.
## ##
if iter =? self.strategy.getIndicies(slot).catch: if iter =? self.strategy.getIndices(slot).catch:
return toSeq(iter) return toSeq(iter)
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest = func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
@ -184,7 +184,7 @@ proc getCellHashes*[T, H](
slotIndex = slotIndex slotIndex = slotIndex
let hashes = collect(newSeq): let hashes = collect(newSeq):
for i, blkIdx in self.strategy.getIndicies(slotIndex): for i, blkIdx in self.strategy.getIndices(slotIndex):
logScope: logScope:
blkIdx = blkIdx blkIdx = blkIdx
pos = i pos = i
@ -310,7 +310,7 @@ proc new*[T, H](
_: type SlotsBuilder[T, H], _: type SlotsBuilder[T, H],
store: BlockStore, store: BlockStore,
manifest: Manifest, manifest: Manifest,
strategy = SteppedStrategy, strategy = LinearStrategy,
cellSize = DefaultCellSize, cellSize = DefaultCellSize,
): ?!SlotsBuilder[T, H] = ): ?!SlotsBuilder[T, H] =
if not manifest.protected: if not manifest.protected:
@ -354,7 +354,14 @@ proc new*[T, H](
emptyBlock = newSeq[byte](manifest.blockSize.int) emptyBlock = newSeq[byte](manifest.blockSize.int)
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int) emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch strategy =
?strategy.init(
0,
manifest.blocksCount - 1,
manifest.numSlots,
manifest.numSlots,
numPadSlotBlocks,
).catch
logScope: logScope:
numSlotBlocks = numSlotBlocks numSlotBlocks = numSlotBlocks

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -53,7 +53,7 @@ proc getSample*[T, H](
cellsPerBlock = self.builder.numBlockCells cellsPerBlock = self.builder.numBlockCells
blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index
blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index
origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx] origBlockIdx = self.builder.slotIndices(self.index)[blkSlotIdx]
# convert to original dataset block index # convert to original dataset block index
logScope: logScope:

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -29,7 +29,7 @@ type
Block Block
Both Both
CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, async: (raises: []).} CidCallback* = proc(cid: Cid): Future[void] {.async: (raises: []).}
BlockStore* = ref object of RootObj BlockStore* = ref object of RootObj
onBlockStored*: ?CidCallback onBlockStored*: ?CidCallback
@ -65,6 +65,19 @@ method getBlock*(
raiseAssert("getBlock by addr not implemented!") raiseAssert("getBlock by addr not implemented!")
method completeBlock*(
self: BlockStore, address: BlockAddress, blk: Block
) {.base, gcsafe.} =
discard
method getBlocks*(
self: BlockStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
## Gets a set of blocks from the blockstore. Blocks might
## be returned in any order.
raiseAssert("getBlocks not implemented!")
method getBlockAndProof*( method getBlockAndProof*(
self: BlockStore, treeCid: Cid, index: Natural self: BlockStore, treeCid: Cid, index: Natural
): Future[?!(Block, CodexProof)] {.base, async: (raises: [CancelledError]), gcsafe.} = ): Future[?!(Block, CodexProof)] {.base, async: (raises: [CancelledError]), gcsafe.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -66,6 +66,21 @@ method getBlock*(
trace "Error requesting block from cache", cid, error = exc.msg trace "Error requesting block from cache", cid, error = exc.msg
return failure exc return failure exc
method getBlocks*(
self: CacheStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var i = 0
proc isFinished(): bool =
i == addresses.len
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
let value = await self.getBlock(addresses[i])
inc(i)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
method getCidAndProof*( method getCidAndProof*(
self: CacheStore, treeCid: Cid, index: Natural self: CacheStore, treeCid: Cid, index: Natural
): Future[?!(Cid, CodexProof)] {.async: (raises: [CancelledError]).} = ): Future[?!(Cid, CodexProof)] {.async: (raises: [CancelledError]).} =
@ -259,6 +274,9 @@ method delBlock*(
return success() return success()
method completeBlock*(self: CacheStore, address: BlockAddress, blk: Block) {.gcsafe.} =
discard
method close*(self: CacheStore): Future[void] {.async: (raises: []).} = method close*(self: CacheStore): Future[void] {.async: (raises: []).} =
## Close the blockstore, a no-op for this implementation ## Close the blockstore, a no-op for this implementation
## ##

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import std/sugar import std/sugar
import pkg/questionable/results import pkg/questionable/results

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH ## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -31,6 +31,31 @@ type NetworkStore* = ref object of BlockStore
engine*: BlockExcEngine # blockexc decision engine engine*: BlockExcEngine # blockexc decision engine
localStore*: BlockStore # local block store localStore*: BlockStore # local block store
method getBlocks*(
self: NetworkStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var
localAddresses: seq[BlockAddress]
remoteAddresses: seq[BlockAddress]
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for address in addresses:
if not (await address in self.localStore):
remoteAddresses.add(address)
else:
localAddresses.add(address)
if (Moment.now() - lastIdle) >= runtimeQuota:
await idleAsync()
lastIdle = Moment.now()
return chain(
await self.localStore.getBlocks(localAddresses),
self.engine.requestBlocks(remoteAddresses),
)
method getBlock*( method getBlock*(
self: NetworkStore, address: BlockAddress self: NetworkStore, address: BlockAddress
): Future[?!Block] {.async: (raises: [CancelledError]).} = ): Future[?!Block] {.async: (raises: [CancelledError]).} =
@ -63,6 +88,9 @@ method getBlock*(
self.getBlock(BlockAddress.init(treeCid, index)) self.getBlock(BlockAddress.init(treeCid, index))
method completeBlock*(self: NetworkStore, address: BlockAddress, blk: Block) =
self.engine.completeBlock(address, blk)
method putBlock*( method putBlock*(
self: NetworkStore, blk: Block, ttl = Duration.none self: NetworkStore, blk: Block, ttl = Duration.none
): Future[?!void] {.async: (raises: [CancelledError]).} = ): Future[?!void] {.async: (raises: [CancelledError]).} =

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -38,6 +38,21 @@ logScope:
# BlockStore API # BlockStore API
########################################################### ###########################################################
method getBlocks*(
self: RepoStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var i = 0
proc isFinished(): bool =
i == addresses.len
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
let value = await self.getBlock(addresses[i])
inc(i)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
method getBlock*( method getBlock*(
self: RepoStore, cid: Cid self: RepoStore, cid: Cid
): Future[?!Block] {.async: (raises: [CancelledError]).} = ): Future[?!Block] {.async: (raises: [CancelledError]).} =
@ -428,7 +443,6 @@ proc start*(
): Future[void] {.async: (raises: [CancelledError, CodexError]).} = ): Future[void] {.async: (raises: [CancelledError, CodexError]).} =
## Start repo ## Start repo
## ##
if self.started: if self.started:
trace "Repo already started" trace "Repo already started"
return return
@ -450,6 +464,5 @@ proc stop*(self: RepoStore): Future[void] {.async: (raises: []).} =
return return
trace "Stopping repo" trace "Stopping repo"
await self.close()
self.started = false self.started = false

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH ## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import std/sugar import std/sugar
import pkg/chronos import pkg/chronos

View File

@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH ## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
import std/options import std/options
import pkg/upraises {.push raises: [], gcsafe.}
push:
{.upraises: [].}
import pkg/chronos import pkg/chronos
import pkg/stew/ptrops import pkg/stew/ptrops

View File

@ -1,9 +1,8 @@
import std/times import std/times
import pkg/upraises
import ./clock import ./clock
type SystemClock* = ref object of Clock type SystemClock* = ref object of Clock
method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} = method now*(clock: SystemClock): SecondsSince1970 {.raises: [].} =
let now = times.now().utc let now = times.now().utc
now.toTime().toUnix() now.toTime().toUnix()

Some files were not shown because too many files have changed in this diff Show More