Compare commits

...

52 Commits

Author SHA1 Message Date
Arnaud
60861d6af8
chore: rename codex to logos storage (#1359) 2025-12-18 17:23:09 +00:00
Eric
49e801803f
ci: remove dist tests and devnet deployment (#1338) 2025-12-17 06:03:59 +00:00
Jacek Sieka
858101c74c
chore: bump eth & networking (#1353) 2025-12-15 10:00:51 +00:00
Jacek Sieka
bd49591fff
chore: bump *-serialization (#1352) 2025-12-12 08:03:56 +00:00
Jacek Sieka
6765beee2c
chore: assorted bumps (#1351) 2025-12-11 21:03:36 +00:00
Jacek Sieka
45fec4b524
chore: bump libbacktrace (#1349) 2025-12-11 20:42:53 +00:00
Jacek Sieka
9ac9f6ff3c
chore: drop usage of upraises (#1348) 2025-12-11 09:03:43 +00:00
Arnaud
bd36032251
feat: add c binding (#1322)
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 07:34:09 +00:00
Chrysostomos Nanakos
be759baf4d
feat: Block exchange optimizations (#1325)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 05:47:02 +00:00
Eric
6147a751f1
fix(ci): Remove macos amd release build (#1337) 2025-11-13 05:37:43 +00:00
Eric
ee47ca8760
feat(libs): Use libp2p multiformats extensions instead of a rolling branch (#1329) 2025-11-13 04:48:33 +00:00
Eric
f791a960f2
fix(ci): Windows SIGILL in CI (#1339) 2025-11-03 11:45:02 +00:00
Arnaud
db8f866db4
feat: check if CID exists in local store (#1331) 2025-11-02 04:32:47 +00:00
Eric
7aca2f0e61
fix(ci): Move conventional commits job to workflow (#1340) 2025-11-02 04:00:55 +00:00
Eric
072bff5cab
fix: ci integration tests (#1335) 2025-10-30 19:38:11 +11:00
Arnaud
af55a761e6
chore: skip marketplace and long integration tests (#1326) 2025-10-22 19:22:33 +11:00
Adam Uhlíř
e3d8d195c3
chore: update nim-libp2p (#1323) 2025-10-01 13:19:15 +02:00
Slava
d1f2e2399b
ci: validate pr title to adhere conventional commits (#1254) 2025-08-12 08:51:41 +00:00
Slava
8cd10edb69
ci: auto deploy codex on devnet (#1302) 2025-07-28 10:02:19 +00:00
Slava
6cf99e255c
ci: release master builds and upload them to the cloud (#1298) 2025-07-10 11:17:11 +00:00
Dmitriy Ryajov
7eb2fb12cc
make default dirs runtime, not compile time. (#1292) 2025-06-26 18:44:24 +00:00
Slava
352273ff81
chore: bump codex-contracts-eth (#1293) 2025-06-26 18:09:48 +00:00
Slava
9ef9258720
chore(ci): bump node to v22 (#1285) 2025-06-26 01:11:00 +00:00
markspanbroek
7927afe715
chore: update nph dependency (#1279)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-25 10:30:48 +00:00
markspanbroek
01615354af
refactor(ci): run integration tests in parallel by spinning up more runners (#1287) 2025-06-25 08:56:16 +00:00
Chrysostomos Nanakos
baff902137
fix: resolve shared block request cancellation conflicts (#1284) 2025-06-24 15:05:25 +00:00
markspanbroek
4d44154a40
fix(ci): remove "update" to gcc-14 on windows (#1288) 2025-06-24 09:00:56 +00:00
markspanbroek
e1c397e112
fix(tests): auto import all tests files and fix forgotten tests (#1281) 2025-06-23 11:18:59 +00:00
Arnaud
7b660e3554
chore(marketplace): use hardhat ignition (#1195) 2025-06-20 15:55:00 +00:00
Arnaud
c5e424ff1b
feat(marketplace) - add status l2 (Linea) network (#1160) 2025-06-20 12:30:40 +00:00
Slava
36f64ad3e6
chore: update testnet marketplace address (#1283) 2025-06-20 06:13:58 +00:00
Ben Bierens
235c0ec842
chore: updates codex-contracts-eth submodule (#1278)
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2025-06-19 10:31:52 +00:00
Arnaud
d443df441d
chore: improve marketplace integration tests (#1268) 2025-06-19 06:36:10 +00:00
Arnaud
e35aec7870
chore: increase gas limits (#1272) 2025-06-18 12:18:56 +00:00
Slava
93e4e0f177
ci(docker): add stable tag for dist-tests images (#1273) 2025-06-16 16:22:09 +00:00
Slava
6db6bf5f72
feat(docker): adjust entrypoint (#1271)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-14 04:25:29 +00:00
Chrysostomos Nanakos
b305e00160
Add support for slot reconstruction on unavailable slot detection (#1235)
Co-authored-by: Arnaud <arnaud@status.im>
2025-06-12 22:19:42 +00:00
Slava
3d2d8273e6
chore: update testnet marketplace address (#1270) 2025-06-12 08:10:22 +00:00
Slava
e324ac8ca5
feat(docker): add codex network support for docker-entrypoint (#1262)
Co-authored-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
2025-06-11 14:02:39 +00:00
Adam Uhlíř
f267d99ea8
ci: docker stable tag (#1265) 2025-06-11 13:49:39 +00:00
Ben Bierens
8af73e02a9
bumps to latest nim-json-rpc (#1267) 2025-06-11 11:58:49 +00:00
markspanbroek
27d807a841
fix(sales): fix marketplace block expiry (#1258) 2025-06-11 11:27:09 +00:00
Ben Bierens
85823342e9
Improves logging in maintenance module and erasure. (#1264) 2025-06-10 13:27:52 +00:00
Ben Bierens
09a8419942
bumps codex-contracts-eth (#1261) 2025-06-10 09:18:04 +00:00
Adam Uhlíř
7502b9ad2c
feat(cirdl): auto-discovery of marketplace contract (#1259) 2025-06-09 10:04:15 +00:00
Arnaud
3e17207a0b
feat(marketplace) - add command line arg for maxPriorityFeePerGas (#1189) 2025-06-05 07:47:39 +00:00
Eric
1bea94c390
fix(tests): fetching past contract events test (#1255) 2025-06-04 20:36:09 -07:00
markspanbroek
ffbbee01b1
fix(purchasing): fix crash completing future more than once (#1249) 2025-06-04 14:15:07 +00:00
markspanbroek
2dd436bfb7
fix(sales): do not crash when retrieving request fails (#1248) 2025-06-04 11:22:14 +00:00
Arnaud
2e1306ac2d
chore: fix custom error handling when simulating invalid proofs (#1217)
* Fix custom error handling when simulating invalid proofs

* Update error message
2025-06-03 12:11:18 +00:00
Arnaud
45ade0e3c1
chore(marketplace): use canMarkProofAsMissing (#1188)
* Add canProofBeMarkedAsMissing

* Add more tests

* Update contracts submodule
2025-06-03 09:08:57 +00:00
Arnaud
ca869f6dce
fix(availabilities): use totalRemainingCollateral instead of totalCollateral for comparaison (#1229)
* Use totalRemainingCollateral instead of totalCollateral to compare the availability changes

* Update test to use totalRemainingCollateral instead of totalCollateral when testing OnAvailabilitySaved

* Reduce poll interval

* Fix flaky test

* Fix format
2025-06-02 16:47:12 +00:00
257 changed files with 7839 additions and 1878 deletions

View File

@ -81,12 +81,6 @@ runs:
mingw-w64-i686-ntldd-git
mingw-w64-i686-rust
- name: MSYS2 (Windows All) - Update to gcc 14
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
- name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
@ -224,7 +218,7 @@ runs:
run: |
git config --global core.symlinks false
- name: Build Nim and Codex dependencies
- name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0}
run: |
which gcc

View File

@ -3,12 +3,14 @@ Tips for shorter build times
### Runner availability ###
Currently, the biggest bottleneck when optimizing workflows is the availability
of Windows and macOS runners. Therefore, anything that reduces the time spent in
Windows or macOS jobs will have a positive impact on the time waiting for
runners to become available. The usage limits for Github Actions are [described
here][limits]. You can see a breakdown of runner usage for your jobs in the
Github Actions tab ([example][usage]).
When running on the Github free, pro or team plan, the bottleneck when
optimizing workflows is the availability of macOS runners. Therefore, anything
that reduces the time spent in macOS jobs will have a positive impact on the
time waiting for runners to become available. On the Github enterprise plan,
this is not the case and you can more freely use parallelization on multiple
runners. The usage limits for Github Actions are [described here][limits]. You
can see a breakdown of runner usage for your jobs in the Github Actions tab
([example][usage]).
### Windows is slow ###
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
Breaking up a long build job into several jobs that you run in parallel can have
a positive impact on the wall clock time that a workflow runs. For instance, you
might consider running unit tests and integration tests in parallel. Keep in
mind however that availability of macOS and Windows runners is the biggest
bottleneck. If you split a Windows job into two jobs, you now need to wait for
two Windows runners to become available! Therefore parallelization often only
makes sense for Linux jobs.
might consider running unit tests and integration tests in parallel. When
running on the Github free, pro or team plan, keep in mind that availability of
macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
need to wait for two macOS runners to become available.
### Refactoring ###
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
to know whether you introduced a failure on all platforms, or only on a single
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

View File

@ -24,9 +24,9 @@ jobs:
run:
shell: ${{ matrix.shell }} {0}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
runs-on: ${{ matrix.builder }}
timeout-minutes: 120
timeout-minutes: 90
steps:
- name: Checkout sources
uses: actions/checkout@v4
@ -49,18 +49,21 @@ jobs:
run: make -j${ncpu} test
- name: Setup Node.js
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
uses: actions/setup-node@v4
with:
node-version: 20
node-version: 22
- name: Start Ethereum node with Codex contracts
- name: Start Ethereum node with Logos Storage contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth
working-directory: vendor/logos-storage-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |
npm install
npm ci
npm start &
# Wait for the contracts to be deployed
sleep 5
## Part 2 Tests ##
- name: Contract tests
@ -70,13 +73,15 @@ jobs:
## Part 3 Tests ##
- name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all'
env:
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
run: make -j${ncpu} testIntegration
- name: Upload integration tests log files
uses: actions/upload-artifact@v4
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
with:
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
path: tests/integration/logs/
retention-days: 1

View File

@ -16,29 +16,21 @@ concurrency:
cancel-in-progress: true
jobs:
matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
run: |
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
build:
needs: matrix

View File

@ -0,0 +1,19 @@
name: Conventional Commits Linting
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
merge_group:
jobs:
pr-title:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@1.4.1
with:
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'

View File

@ -1,54 +0,0 @@
name: Docker - Dist-Tests
on:
push:
branches:
- master
tags:
- 'v*.*.*'
paths-ignore:
- '**/*.md'
- '.gitignore'
- '.github/**'
- '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml'
- 'docker/**'
- '!docker/codex.Dockerfile'
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
inputs:
run_release_tests:
description: Run Release tests
required: false
type: boolean
default: false
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests"
run_release_tests: ${{ inputs.run_release_tests }}
secrets: inherit

View File

@ -34,6 +34,11 @@ on:
description: Set latest tag for Docker images
required: false
type: boolean
tag_stable:
default: false
description: Set stable tag for Docker images
required: false
type: boolean
tag_sha:
default: true
description: Set Git short commit as Docker tag
@ -63,6 +68,10 @@ on:
description: Specifies compatible smart contract image
required: false
type: string
outputs:
codex_image:
description: Logos Storage Docker image tag
value: ${{ jobs.publish.outputs.codex_image }}
env:
@ -73,11 +82,12 @@ env:
NIMFLAGS: ${{ inputs.nimflags }}
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
TAG_LATEST: ${{ inputs.tag_latest }}
TAG_STABLE: ${{ inputs.tag_stable }}
TAG_SHA: ${{ inputs.tag_sha }}
TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests
TESTS_SOURCE: codex-storage/cs-codex-dist-tests
TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
@ -85,15 +95,16 @@ env:
jobs:
# Compute variables
compute:
name: Compute build ID
runs-on: ubuntu-latest
outputs:
build_id: ${{ steps.build_id.outputs.build_id }}
steps:
- name: Generate unique build id
id: build_id
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
name: Compute build ID
runs-on: ubuntu-latest
outputs:
build_id: ${{ steps.build_id.outputs.build_id }}
steps:
- name: Generate unique build id
id: build_id
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
# Build platform specific image
build:
@ -128,7 +139,7 @@ jobs:
run: |
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi
- name: Docker - Meta
@ -183,34 +194,35 @@ jobs:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.meta.outputs.version }}
codex_image: ${{ steps.image_tag.outputs.codex_image }}
needs: [build, compute]
steps:
- name: Docker - Variables
run: |
# Adjust custom suffix when set and
# Adjust custom suffix when set
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi
# Disable SHA tags on tagged release
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
echo "TAG_SHA=false" >>$GITHUB_ENV
echo "TAG_SHA=false" >> $GITHUB_ENV
fi
# Handle latest and latest-custom using raw
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
echo "TAG_LATEST=false" >>$GITHUB_ENV
echo "TAG_RAW=true" >>$GITHUB_ENV
echo "TAG_LATEST=false" >> $GITHUB_ENV
echo "TAG_RAW=true" >> $GITHUB_ENV
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
else
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi
else
echo "TAG_RAW=false" >>$GITHUB_ENV
echo "TAG_RAW=false" >> $GITHUB_ENV
fi
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi
- name: Docker - Download digests
@ -235,6 +247,7 @@ jobs:
tags: |
type=semver,pattern={{version}}
type=raw,enable=${{ env.TAG_RAW }},value=latest
type=raw,enable=${{ env.TAG_STABLE }},value=stable
type=sha,enable=${{ env.TAG_SHA }}
- name: Docker - Login to Docker Hub
@ -249,9 +262,12 @@ jobs:
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
- name: Docker - Image tag
id: image_tag
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
- name: Docker - Inspect image
run: |
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
# Compute Tests inputs
@ -300,7 +316,7 @@ jobs:
max-parallel: 1
matrix:
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
@ -317,7 +333,7 @@ jobs:
name: Run Release Tests
needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}

View File

@ -18,7 +18,6 @@ on:
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
@ -32,7 +31,7 @@ jobs:
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
@ -40,5 +39,6 @@ jobs:
needs: get-contracts-hash
with:
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
secrets: inherit

View File

@ -52,7 +52,7 @@ jobs:
node-version: 18
- name: Build OpenAPI
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API"
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false

View File

@ -8,22 +8,21 @@ env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned
jobs:
jobs:
matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }}
steps:
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix
id: matrix
run: |
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
build:
needs: matrix

View File

@ -4,13 +4,15 @@ on:
push:
tags:
- 'v*.*.*'
branches:
- master
workflow_dispatch:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned
rust_version: 1.79.0
codex_binary_base: codex
storage_binary_base: storage
cirdl_binary_base: cirdl
build_dir: build
nim_flags: ''
@ -30,7 +32,6 @@ jobs:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
@ -72,18 +73,18 @@ jobs:
windows*) os_name="windows" ;;
esac
github_ref_name="${GITHUB_REF_NAME/\//-}"
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then
codex_binary="${codex_binary}.exe"
storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe"
fi
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV
echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
- name: Release - Build
run: |
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}"
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Libraries
@ -94,11 +95,11 @@ jobs:
done
fi
- name: Release - Upload codex build artifacts
- name: Release - Upload Logos Storage build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.codex_binary }}
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}*
name: release-${{ env.storage_binary }}
path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
retention-days: 30
- name: Release - Upload cirdl build artifacts
@ -138,7 +139,7 @@ jobs:
}
# Compress and prepare
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do
for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only
@ -170,6 +171,34 @@ jobs:
path: /tmp/release/
retention-days: 30
- name: Release - Upload to the cloud
env:
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
s3_bucket: ${{ secrets.S3_BUCKET }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
run: |
# Variables
branch="${GITHUB_REF_NAME/\//-}"
folder="/tmp/release"
# Tagged releases
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
echo "${branch}" > "${folder}"/latest
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
rm -f "${folder}"/latest
# master branch
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
# Custom branch
else
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
fi
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
@ -183,6 +212,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
with:
token: ${{ secrets.DISPATCH_PAT }}
repository: codex-storage/py-codex-api-client
repository: logos-storage/logos-storage-py-api-client
event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}'
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

49
.gitmodules vendored
View File

@ -37,22 +37,17 @@
path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/questionable"]
path = vendor/questionable
url = https://github.com/status-im/questionable.git
ignore = untracked
branch = master
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/asynctest"]
path = vendor/asynctest
url = https://github.com/status-im/asynctest.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-presto"]
path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git
@ -132,7 +127,7 @@
path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi
@ -160,13 +155,13 @@
path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git
ignore = untracked
branch = master
branch = stable
[submodule "vendor/nim-leopard"]
path = vendor/nim-leopard
url = https://github.com/status-im/nim-leopard.git
[submodule "vendor/nim-codex-dht"]
path = vendor/nim-codex-dht
url = https://github.com/codex-storage/nim-codex-dht.git
[submodule "vendor/logos-storage-nim-dht"]
path = vendor/logos-storage-nim-dht
url = https://github.com/logos-storage/logos-storage-nim-dht.git
ignore = untracked
branch = master
[submodule "vendor/nim-datastore"]
@ -178,9 +173,11 @@
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"]
path = vendor/codex-contracts-eth
url = https://github.com/status-im/codex-contracts-eth
[submodule "vendor/logos-storage-contracts-eth"]
path = vendor/logos-storage-contracts-eth
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
ignore = untracked
branch = master
[submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization
@ -195,26 +192,28 @@
url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git
url = https://github.com/logos-storage/nim-poseidon2.git
ignore = untracked
branch = master
[submodule "vendor/constantine"]
path = vendor/constantine
url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git
url = https://github.com/logos-storage/nim-circom-compat.git
ignore = untracked
branch = master
[submodule "vendor/codex-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
[submodule "vendor/logos-storage-proofs-circuits"]
path = vendor/logos-storage-proofs-circuits
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
ignore = untracked
branch = master
[submodule "vendor/nim-serde"]
path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git
url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git
url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"]
path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git
@ -225,9 +224,9 @@
path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = master
branch = main

2
Jenkinsfile vendored
View File

@ -25,7 +25,7 @@ pipeline {
stage('Check') {
steps {
script {
sh './result/bin/codex --version'
sh './result/bin/storage --version'
}
}
}

View File

@ -93,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.'
# Builds the codex binary
# Builds the Logos Storage binary
all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
@ -232,6 +232,7 @@ format:
$(NPH) *.nim
$(NPH) codex/
$(NPH) tests/
$(NPH) library/
clean-nph:
rm -f $(NPH)
@ -242,4 +243,32 @@ print-nph-path:
clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
endif
endif # "variables.mk" was not included

View File

@ -1,22 +1,22 @@
# Codex Decentralized Durability Engine
# Logos Storage Decentralized Engine
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval.
> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex)
[![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
## Build and Run
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build).
For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
To build the project, clone it and run:
@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root
Run the client with:
```bash
build/codex
build/storage
```
## Configuration
It is possible to configure a Codex node in several ways:
It is possible to configure a Logos Storage node in several ways:
1. CLI options
2. Environment variables
3. Configuration file
@ -45,21 +45,71 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
## Guides
To get acquainted with Codex, consider:
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
To get acquainted with Logos Storage, consider:
* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
Currently, only a Go binding is included.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
Build the Go example:
```bash
go build -o storage-go examples/golang/storage.go
```
Export the library path:
```bash
export LD_LIBRARY_PATH=build
```
Run the example:
```bash
./storage-go
```
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.

View File

@ -10,17 +10,17 @@ nim c -r run_benchmarks
```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI
## Logos Storage Ark Circom CLI
Runs Codex's prover setup with Ark / Circom.
Runs Logos Storage's prover setup with Ark / Circom.
Compile:
```sh

View File

@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir()
result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli"
result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
@ -118,7 +118,7 @@ proc createCircuit*(
##
## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs.
##
let circdir = circBenchDir

View File

@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
)
benchRuns[benchmarkName] = (runs.avg(), count)
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
if printRegular:
echo ""
for k, v in benchRuns:
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
if printTsv:
echo ""
echo "name", "\t", "avgTimeSec", "\t", "count"
for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math
func floorLog2*(x: int): int =

View File

@ -3,7 +3,7 @@ mode = ScriptMode.Verbose
import std/os except commandLineParams
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
if not dirExists "build":
mkDir "build"
@ -18,57 +18,82 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
let
# Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
name & ".nim"
srcName & ".nim"
exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, srcDir, params
exec "build/" & name
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
if not dirExists "build":
mkDir "build"
task codex, "build codex binary":
if `type` == "dynamic":
let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " &
"-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " &
params & " " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task storage, "build logos storage binary":
buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl"
task testCodex, "Build & run Codex tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true"
task testStorage, "Build & run Logos Storage tests":
test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
task testContracts, "Build & run Codex Contract tests":
task testContracts, "Build & run Logos Storage Contract tests":
test "testContracts"
task testIntegration, "Run integration tests":
buildBinary "codex",
outName = "storage",
params =
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
test "testIntegration"
# use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary":
codexTask()
task build, "build Logos Storage binary":
storageTask()
task test, "Run tests":
testCodexTask()
testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask()
testStorageTask()
testContractsTask()
testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
codexTask()
storageTask()
test "testTaiko"
import strutils
@ -101,7 +126,7 @@ task coverage, "generates code coverage report":
test "coverage",
srcDir = "tests/",
params =
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c")
rmDir("coverage")
mkDir("coverage")
@ -121,3 +146,23 @@ task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "":
exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -45,7 +45,7 @@ when isMainModule:
let config = CodexConf.load(
version = codexFullVersion,
envVarsPrefix = "codex",
envVarsPrefix = "storage",
secondarySources = proc(
config: CodexConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} =
@ -54,6 +54,16 @@ when isMainModule:
,
)
config.setupLogging()
try:
updateLogLevel(config.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
config.setupMetrics()
if not (checkAndCreateDataDir((config.dataDir).string)):
@ -89,15 +99,15 @@ when isMainModule:
try:
CodexServer.new(config, privateKey)
except Exception as exc:
error "Failed to start Codex", msg = exc.msg
error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure
## Ctrl+C handling
proc doShutdown() =
shutdown = server.stop()
shutdown = server.shutdown()
state = CodexStatus.Stopping
notice "Stopping Codex"
notice "Stopping Logos Storage"
proc controlCHandler() {.noconv.} =
when defined(windows):
@ -128,7 +138,7 @@ when isMainModule:
try:
waitFor server.start()
except CatchableError as error:
error "Codex failed to start", error = error.msg
error "Logos Storage failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they
@ -149,7 +159,7 @@ when isMainModule:
# be assigned before state switches to Stopping
waitFor shutdown
except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg
error "Logos Storage didn't shutdown correctly", error = error.msg
quit QuitFailure
notice "Exited codex"
notice "Exited Storage"

View File

@ -1,5 +1,5 @@
version = "0.1.0"
author = "Codex Team"
author = "Logos Storage Team"
description = "p2p data durability engine"
license = "MIT"
binDir = "build"

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -124,6 +124,10 @@ proc start*(b: Advertiser) {.async: (raises: []).} =
trace "Advertiser start"
# The advertiser is expected to be started only once.
if b.advertiserRunning:
raiseAssert "Advertiser can only be started once — this should not happen"
proc onBlock(cid: Cid) {.async: (raises: []).} =
try:
await b.advertiseBlock(cid)
@ -133,10 +137,6 @@ proc start*(b: Advertiser) {.async: (raises: []).} =
doAssert(b.localStore.onBlockStored.isNone())
b.localStore.onBlockStored = onBlock.some
if b.advertiserRunning:
warn "Starting advertiser twice"
return
b.advertiserRunning = true
for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop()

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -8,6 +8,7 @@
## those terms.
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/libp2p/cid
@ -38,6 +39,7 @@ const
DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds
type DiscoveryEngine* = ref object of RootObj
@ -51,11 +53,32 @@ type DiscoveryEngine* = ref object of RootObj
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block
minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try:
while b.discEngineRunning:
@ -78,8 +101,16 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
trace "Discovery request already in progress", cid
continue
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
@ -156,6 +187,7 @@ proc new*(
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
##
@ -171,4 +203,5 @@ proc new*(
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,12 +12,14 @@ import std/sets
import std/options
import std/algorithm
import std/sugar
import std/random
import pkg/chronos
import pkg/libp2p/[cid, switch, multihash, multicodec]
import pkg/metrics
import pkg/stint
import pkg/questionable
import pkg/stew/shims/sets
import ../../rng
import ../../stores/blockstore
@ -63,30 +65,59 @@ declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sen
declareCounter(
codex_block_exchange_blocks_received, "codex blockexchange blocks received"
)
declareCounter(
codex_block_exchange_spurious_blocks_received,
"codex blockexchange unrequested/duplicate blocks received",
)
declareCounter(
codex_block_exchange_discovery_requests_total,
"Total number of peer discovery requests sent",
)
declareCounter(
codex_block_exchange_peer_timeouts_total, "Total number of peer activity timeouts"
)
declareCounter(
codex_block_exchange_requests_failed_total,
"Total number of block requests that failed after exhausting retries",
)
const
DefaultMaxPeersPerRequest* = 10
# The default max message length of nim-libp2p is 100 megabytes, meaning we can
# in principle fit up to 1600 64k blocks per message, so 20 is well under
# that number.
DefaultMaxBlocksPerMessage = 20
DefaultTaskQueueSize = 100
DefaultConcurrentTasks = 10
# Don't do more than one discovery request per `DiscoveryRateLimit` seconds.
DiscoveryRateLimit = 3.seconds
DefaultPeerActivityTimeout = 1.minutes
# Match MaxWantListBatchSize to efficiently respond to incoming WantLists
PresenceBatchSize = MaxWantListBatchSize
CleanupBatchSize = 2048
type
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
PeerSelector* =
proc(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx {.gcsafe, raises: [].}
BlockExcEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
network*: BlockExcNetwork # Petwork interface
network*: BlockExcNetwork # Network interface
peers*: PeerCtxStore # Peers we're currently actively exchanging with
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
# Peers we're currently processing tasks for
selectPeer*: PeerSelector # Peers we're currently processing tasks for
concurrentTasks: int # Number of concurrent peers we're serving at any given time
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
blockexcRunning: bool # Indicates if the blockexc task is running
maxBlocksPerMessage: int
# Maximum number of blocks we can squeeze in a single message
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing
discovery*: DiscoveryEngine
advertiser*: Advertiser
lastDiscRequest: Moment # time of last discovery request
Pricing* = object
address*: EthAddress
@ -104,7 +135,6 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
## Start the blockexc task
##
await self.discovery.start()
await self.advertiser.start()
@ -154,8 +184,145 @@ proc sendWantBlock(
) # we want this remote to send us a block
codex_block_exchange_want_block_lists_sent.inc()
proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
Rng.instance.sample(peers)
proc sendBatchedWantList(
self: BlockExcEngine,
peer: BlockExcPeerCtx,
addresses: seq[BlockAddress],
full: bool,
) {.async: (raises: [CancelledError]).} =
var offset = 0
while offset < addresses.len:
let batchEnd = min(offset + MaxWantListBatchSize, addresses.len)
let batch = addresses[offset ..< batchEnd]
trace "Sending want list batch",
peer = peer.id,
batchSize = batch.len,
offset = offset,
total = addresses.len,
full = full
await self.network.request.sendWantList(
peer.id, batch, full = (full and offset == 0)
)
for address in batch:
peer.lastSentWants.incl(address)
offset = batchEnd
proc refreshBlockKnowledge(
self: BlockExcEngine, peer: BlockExcPeerCtx, skipDelta = false, resetBackoff = false
) {.async: (raises: [CancelledError]).} =
if peer.lastSentWants.len > 0:
var toRemove: seq[BlockAddress]
for address in peer.lastSentWants:
if address notin self.pendingBlocks:
toRemove.add(address)
if toRemove.len >= CleanupBatchSize:
await idleAsync()
break
for addr in toRemove:
peer.lastSentWants.excl(addr)
if self.pendingBlocks.wantListLen == 0:
if peer.lastSentWants.len > 0:
trace "Clearing want list tracking, no pending blocks", peer = peer.id
peer.lastSentWants.clear()
return
# We send only blocks that the peer hasn't already told us that they already have.
let
peerHave = peer.peerHave
toAsk = toHashSet(self.pendingBlocks.wantList.toSeq.filterIt(it notin peerHave))
if toAsk.len == 0:
if peer.lastSentWants.len > 0:
trace "Clearing want list tracking, peer has all blocks", peer = peer.id
peer.lastSentWants.clear()
return
let newWants = toAsk - peer.lastSentWants
if peer.lastSentWants.len > 0 and not skipDelta:
if newWants.len > 0:
trace "Sending delta want list update",
peer = peer.id, newWants = newWants.len, totalWants = toAsk.len
await self.sendBatchedWantList(peer, newWants.toSeq, full = false)
if resetBackoff:
peer.wantsUpdated
else:
trace "No changes in want list, skipping send", peer = peer.id
peer.lastSentWants = toAsk
else:
trace "Sending full want list", peer = peer.id, length = toAsk.len
await self.sendBatchedWantList(peer, toAsk.toSeq, full = true)
if resetBackoff:
peer.wantsUpdated
proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledError]).} =
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for peer in self.peers.peers.values.toSeq:
# We refresh block knowledge if:
# 1. the peer hasn't been refreshed in a while;
# 2. the list of blocks we care about has changed.
#
# Note that because of (2), it is important that we update our
# want list in the coarsest way possible instead of over many
# small updates.
#
# In dynamic swarms, staleness will dominate latency.
let
hasNewBlocks = peer.lastRefresh < self.pendingBlocks.lastInclusion
isKnowledgeStale = peer.isKnowledgeStale
if isKnowledgeStale or hasNewBlocks:
if not peer.refreshInProgress:
peer.refreshRequested()
await self.refreshBlockKnowledge(
peer, skipDelta = isKnowledgeStale, resetBackoff = hasNewBlocks
)
else:
trace "Not refreshing: peer is up to date", peer = peer.id
if (Moment.now() - lastIdle) >= runtimeQuota:
try:
await idleAsync()
except CancelledError:
discard
lastIdle = Moment.now()
proc searchForNewPeers(self: BlockExcEngine, cid: Cid) =
if self.lastDiscRequest + DiscoveryRateLimit < Moment.now():
trace "Searching for new peers for", cid = cid
codex_block_exchange_discovery_requests_total.inc()
self.lastDiscRequest = Moment.now() # always refresh before calling await!
self.discovery.queueFindBlocksReq(@[cid])
else:
trace "Not searching for new peers, rate limit not expired", cid = cid
proc evictPeer(self: BlockExcEngine, peer: PeerId) =
## Cleanup disconnected peer
##
trace "Evicting disconnected/departed peer", peer
let peerCtx = self.peers.get(peer)
if not peerCtx.isNil:
for address in peerCtx.blocksRequested:
self.pendingBlocks.clearRequest(address, peer.some)
# drop the peer from the peers table
self.peers.remove(peer)
proc downloadInternal(
self: BlockExcEngine, address: BlockAddress
@ -173,41 +340,147 @@ proc downloadInternal(
if self.pendingBlocks.retriesExhausted(address):
trace "Error retries exhausted"
codex_block_exchange_requests_failed_total.inc()
handle.fail(newException(RetriesExhaustedError, "Error retries exhausted"))
break
trace "Running retry handle"
let peers = self.peers.getPeersForBlock(address)
logScope:
peersWith = peers.with.len
peersWithout = peers.without.len
trace "Peers for block"
if peers.with.len > 0:
self.pendingBlocks.setInFlight(address, true)
await self.sendWantBlock(@[address], peers.with.randomPeer)
else:
self.pendingBlocks.setInFlight(address, false)
if peers.with.len == 0:
# We know of no peers that have the block.
if peers.without.len > 0:
await self.sendWantHave(@[address], peers.without)
self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
# If we have peers connected but none of them have the block, this
# could be because our knowledge about what they have has run stale.
# Tries to refresh it.
await self.refreshBlockKnowledge()
# Also tries to look for new peers for good measure.
# TODO: in the future, peer search and knowledge maintenance should
# be completely decoupled from one another. It is very hard to
# control what happens and how many neighbors we get like this.
self.searchForNewPeers(address.cidOrTreeCid)
await (handle or sleepAsync(self.pendingBlocks.retryInterval))
let nextDiscovery =
if self.lastDiscRequest + DiscoveryRateLimit > Moment.now():
(self.lastDiscRequest + DiscoveryRateLimit - Moment.now())
else:
0.milliseconds
let retryDelay =
max(secs(rand(self.pendingBlocks.retryInterval.secs)), nextDiscovery)
# We now wait for a bit and then retry. If the handle gets completed in the
# meantime (cause the presence handler might have requested the block and
# received it in the meantime), we are done. Retry delays are randomized
# so we don't get all block loops spinning at the same time.
await handle or sleepAsync(retryDelay)
if handle.finished:
break
# Without decrementing the retries count, this would infinitely loop
# trying to find peers.
self.pendingBlocks.decRetries(address)
# If we still don't have the block, we'll go for another cycle.
trace "No peers for block, will retry shortly"
continue
# Once again, it might happen that the block was requested to a peer
# in the meantime. If so, we don't need to do anything. Otherwise,
# we'll be the ones placing the request.
let scheduledPeer =
if not self.pendingBlocks.isRequested(address):
let peer = self.selectPeer(peers.with)
discard self.pendingBlocks.markRequested(address, peer.id)
peer.blockRequestScheduled(address)
trace "Request block from block retry loop"
await self.sendWantBlock(@[address], peer)
peer
else:
let peerId = self.pendingBlocks.getRequestPeer(address).get()
self.peers.get(peerId)
if scheduledPeer.isNil:
trace "Scheduled peer no longer available, clearing stale request", address
self.pendingBlocks.clearRequest(address)
continue
# Parks until either the block is received, or the peer times out.
let activityTimer = scheduledPeer.activityTimer()
await handle or activityTimer # TODO: or peerDropped
activityTimer.cancel()
# XXX: we should probably not have this. Blocks should be retried
# to infinity unless cancelled by the client.
self.pendingBlocks.decRetries(address)
if handle.finished:
trace "Handle for block finished", failed = handle.failed
break
else:
# If the peer timed out, retries immediately.
trace "Peer timed out during block request", peer = scheduledPeer.id
codex_block_exchange_peer_timeouts_total.inc()
await self.network.dropPeer(scheduledPeer.id)
# Evicts peer immediately or we may end up picking it again in the
# next retry.
self.evictPeer(scheduledPeer.id)
except CancelledError as exc:
trace "Block download cancelled"
if not handle.finished:
await handle.cancelAndWait()
except RetriesExhaustedError as exc:
warn "Retries exhausted for block", address, exc = exc.msg
codex_block_exchange_requests_failed_total.inc()
if not handle.finished:
handle.fail(exc)
finally:
self.pendingBlocks.setInFlight(address, false)
self.pendingBlocks.clearRequest(address)
proc requestBlocks*(
self: BlockExcEngine, addresses: seq[BlockAddress]
): SafeAsyncIter[Block] =
var handles: seq[BlockHandle]
# Adds all blocks to pendingBlocks before calling the first downloadInternal. This will
# ensure that we don't send incomplete want lists.
for address in addresses:
if address notin self.pendingBlocks:
handles.add(self.pendingBlocks.getWantHandle(address))
for address in addresses:
self.trackedFutures.track(self.downloadInternal(address))
let totalHandles = handles.len
var completed = 0
proc isFinished(): bool =
completed == totalHandles
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
# Be it success or failure, we're completing this future.
let value =
try:
# FIXME: this is super expensive. We're doing several linear scans,
# not to mention all the copying and callback fumbling in `one`.
let
handle = await one(handles)
i = handles.find(handle)
handles.del(i)
success await handle
except CancelledError as err:
warn "Block request cancelled", addresses, err = err.msg
raise err
except CatchableError as err:
error "Error getting blocks from exchange engine", addresses, err = err.msg
failure err
inc(completed)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
proc requestBlock*(
self: BlockExcEngine, address: BlockAddress
@ -230,63 +503,73 @@ proc requestBlock*(
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
self.requestBlock(BlockAddress.init(cid))
proc completeBlock*(self: BlockExcEngine, address: BlockAddress, blk: Block) =
if address in self.pendingBlocks.blocks:
self.pendingBlocks.completeWantHandle(address, blk)
else:
warn "Attempted to complete non-pending block", address
proc blockPresenceHandler*(
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async: (raises: []).} =
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
trace "Received block presence from peer", peer, len = blocks.len
let
peerCtx = self.peers.get(peer)
ourWantList = toSeq(self.pendingBlocks.wantList)
ourWantList = toHashSet(self.pendingBlocks.wantList.toSeq)
if peerCtx.isNil:
return
peerCtx.refreshReplied()
for blk in blocks:
if presence =? Presence.init(blk):
peerCtx.setPresence(presence)
let
peerHave = peerCtx.peerHave
dontWantCids = peerHave.filterIt(it notin ourWantList)
dontWantCids = peerHave - ourWantList
if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids)
peerCtx.cleanPresence(dontWantCids.toSeq)
let ourWantCids = ourWantList.filterIt(
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
not self.pendingBlocks.isInFlight(it)
)
self.pendingBlocks.markRequested(it, peer)
).toSeq
for address in ourWantCids:
self.pendingBlocks.setInFlight(address, true)
self.pendingBlocks.decRetries(address)
peerCtx.blockRequestScheduled(address)
if ourWantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
# FIXME: this will result in duplicate requests for blocks
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
warn "Failed to send wantBlock to peer", peer, err = err.msg
for address in ourWantCids:
self.pendingBlocks.clearRequest(address, peer.some)
proc scheduleTasks(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to
for p in self.peers:
for c in cids: # for each cid
for blockDelivery in blocksDelivery: # for each cid
# schedule a peer if it wants at least one cid
# and we have it in our local store
if c in p.peerWantsCids:
if blockDelivery.address in p.wantedBlocks:
let cid = blockDelivery.blk.cid
try:
if await (c in self.localStore):
if await (cid in self.localStore):
# TODO: the try/except should go away once blockstore tracks exceptions
self.scheduleTask(p)
break
except CancelledError as exc:
warn "Checking local store canceled", cid = c, err = exc.msg
warn "Checking local store canceled", cid = cid, err = exc.msg
return
except CatchableError as exc:
error "Error checking local store for cid", cid = c, err = exc.msg
error "Error checking local store for cid", cid = cid, err = exc.msg
raiseAssert "Unexpected error checking local store for cid"
proc cancelBlocks(
@ -295,28 +578,45 @@ proc cancelBlocks(
## Tells neighboring peers that we're no longer interested in a block.
##
let blocksDelivered = toHashSet(addrs)
var scheduledCancellations: Table[PeerId, HashSet[BlockAddress]]
if self.peers.len == 0:
return
trace "Sending block request cancellations to peers",
addrs, peers = self.peers.peerIds
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
proc dispatchCancellations(
entry: tuple[peerId: PeerId, addresses: HashSet[BlockAddress]]
): Future[PeerId] {.async: (raises: [CancelledError]).} =
trace "Sending block request cancellations to peer",
peer = entry.peerId, addresses = entry.addresses.len
await self.network.request.sendWantCancellations(
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
peer = entry.peerId, addresses = entry.addresses.toSeq
)
return peerCtx
return entry.peerId
try:
let (succeededFuts, failedFuts) = await allFinishedFailed[BlockExcPeerCtx](
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
processPeer
)
for peerCtx in self.peers.peers.values:
# Do we have pending requests, towards this peer, for any of the blocks
# that were just delivered?
let intersection = peerCtx.blocksRequested.intersection(blocksDelivered)
if intersection.len > 0:
# If so, schedules a cancellation.
scheduledCancellations[peerCtx.id] = intersection
if scheduledCancellations.len == 0:
return
let (succeededFuts, failedFuts) = await allFinishedFailed[PeerId](
toSeq(scheduledCancellations.pairs).map(dispatchCancellations)
)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
peerCtx.cleanPresence(addrs)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerId: PeerId):
let ctx = self.peers.get(peerId)
if not ctx.isNil:
ctx.cleanPresence(addrs)
for address in scheduledCancellations[peerId]:
ctx.blockRequestCancelled(address)
if failedFuts.len > 0:
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
@ -386,17 +686,31 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
return success()
proc blocksDeliveryHandler*(
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
self: BlockExcEngine,
peer: PeerId,
blocksDelivery: seq[BlockDelivery],
allowSpurious: bool = false,
) {.async: (raises: []).} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery]
let peerCtx = self.peers.get(peer)
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for bd in blocksDelivery:
logScope:
peer = peer
address = bd.address
try:
# Unknown peers and unrequested blocks are dropped with a warning.
if not allowSpurious and (peerCtx == nil or not peerCtx.blockReceived(bd.address)):
warn "Dropping unrequested or duplicate block received from peer"
codex_block_exchange_spurious_blocks_received.inc()
continue
if err =? self.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg
continue
@ -416,15 +730,25 @@ proc blocksDeliveryHandler*(
).errorOption:
warn "Unable to store proof and cid for a block"
continue
except CancelledError:
trace "Block delivery handling cancelled"
except CatchableError as exc:
warn "Error handling block delivery", error = exc.msg
continue
validatedBlocksDelivery.add(bd)
if (Moment.now() - lastIdle) >= runtimeQuota:
try:
await idleAsync()
except CancelledError:
discard
except CatchableError:
discard
lastIdle = Moment.now()
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let peerCtx = self.peers.get(peer)
if peerCtx != nil:
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
warn "Error paying for blocks", err = err.msg
@ -448,16 +772,17 @@ proc wantListHandler*(
presence: seq[BlockPresence]
schedulePeer = false
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
try:
for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope:
peer = peerCtx.id
address = e.address
wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants
if e.address notin peerCtx.wantedBlocks: # Adding new entry to peer wants
let
have =
try:
@ -468,6 +793,8 @@ proc wantListHandler*(
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel:
# This is sort of expected if we sent the block to the peer, as we have removed
# it from the peer's wantlist ourselves.
trace "Received cancelation for untracked block, skipping",
address = e.address
continue
@ -476,12 +803,14 @@ proc wantListHandler*(
case e.wantType
of WantType.WantHave:
if have:
trace "We HAVE the block", address = e.address
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
trace "We DON'T HAVE the block", address = e.address
if e.sendDontHave:
presence.add(
BlockPresence(
@ -491,28 +820,35 @@ proc wantListHandler*(
codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock:
peerCtx.peerWants.add(e)
peerCtx.wantedBlocks.incl(e.address)
schedulePeer = true
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
peerCtx.wantedBlocks.excl(e.address)
trace "Canceled block request",
address = e.address, len = peerCtx.peerWants.len
address = e.address, len = peerCtx.wantedBlocks.len
else:
trace "Peer has requested a block more than once", address = e.address
if e.wantType == WantType.WantBlock:
schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request",
address = e.address, len = peerCtx.peerWants.len
if presence.len >= PresenceBatchSize or (Moment.now() - lastIdle) >= runtimeQuota:
if presence.len > 0:
trace "Sending presence batch to remote", items = presence.len
await self.network.request.sendPresence(peer, presence)
presence = @[]
try:
await idleAsync()
except CancelledError:
discard
lastIdle = Moment.now()
# Send any remaining presence messages
if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
trace "Sending final presence to remote", items = presence.len
await self.network.request.sendPresence(peer, presence)
if schedulePeer:
@ -544,7 +880,7 @@ proc paymentHandler*(
else:
context.paymentChannel = self.wallet.acceptChannel(payment).option
proc setupPeer*(
proc peerAddedHandler*(
self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Perform initial setup, such as want
@ -554,88 +890,85 @@ proc setupPeer*(
trace "Setting up peer", peer
if peer notin self.peers:
let peerCtx = BlockExcPeerCtx(id: peer, activityTimeout: DefaultPeerActivityTimeout)
trace "Setting up new peer", peer
self.peers.add(BlockExcPeerCtx(id: peer))
self.peers.add(peerCtx)
trace "Added peer", peers = self.peers.len
# broadcast our want list, the other peer will do the same
if self.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(self.pendingBlocks.wantList)
await self.network.request.sendWantList(peer, cids, full = true)
await self.refreshBlockKnowledge(peerCtx)
if address =? self.pricing .? address:
trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
## Cleanup disconnected peer
##
proc localLookup(
self: BlockExcEngine, address: BlockAddress
): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} =
if address.leaf:
(await self.localStore.getBlockAndProof(address.treeCid, address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(address: address, blk: blkAndProof[0], proof: blkAndProof[1].some)
)
else:
(await self.localStore.getBlock(address)).map(
(blk: Block) => BlockDelivery(address: address, blk: blk, proof: CodexProof.none)
)
trace "Dropping peer", peer
iterator splitBatches[T](sequence: seq[T], batchSize: int): seq[T] =
var batch: seq[T]
for element in sequence:
if batch.len == batchSize:
yield batch
batch = @[]
batch.add(element)
# drop the peer from the peers table
self.peers.remove(peer)
if batch.len > 0:
yield batch
proc taskHandler*(
self: BlockExcEngine, task: BlockExcPeerCtx
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
self: BlockExcEngine, peerCtx: BlockExcPeerCtx
) {.async: (raises: [CancelledError, RetriesExhaustedError]).} =
# Send to the peer blocks he wants to get,
# if they present in our local store
# TODO: There should be all sorts of accounting of
# bytes sent/received here
# Blocks that have been sent have already been picked up by other tasks and
# should not be re-sent.
var
wantedBlocks = peerCtx.wantedBlocks.filterIt(not peerCtx.isBlockSent(it))
sent: HashSet[BlockAddress]
var wantsBlocks =
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
trace "Running task for peer", peer = peerCtx.id
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
for peerWant in task.peerWants.mitems:
if peerWant.address in addresses:
peerWant.inFlight = inFlight
for wantedBlock in wantedBlocks:
peerCtx.markBlockAsSent(wantedBlock)
if wantsBlocks.len > 0:
# Mark wants as in-flight.
let wantAddresses = wantsBlocks.mapIt(it.address)
updateInFlight(wantAddresses, true)
wantsBlocks.sort(SortOrder.Descending)
try:
for batch in wantedBlocks.toSeq.splitBatches(self.maxBlocksPerMessage):
var blockDeliveries: seq[BlockDelivery]
for wantedBlock in batch:
# I/O is blocking so looking up blocks sequentially is fine.
without blockDelivery =? await self.localLookup(wantedBlock), err:
error "Error getting block from local store",
err = err.msg, address = wantedBlock
peerCtx.markBlockAsNotSent(wantedBlock)
continue
blockDeliveries.add(blockDelivery)
sent.incl(wantedBlock)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
if e.address.leaf:
(await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
)
)
else:
(await self.localStore.getBlock(e.address)).map(
(blk: Block) =>
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
)
if blockDeliveries.len == 0:
continue
let
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
if bd =? it.value:
bd
else:
raiseAssert "Unexpected error in local lookup"
# All the wants that failed local lookup must be set to not-in-flight again.
let
successAddresses = blocksDelivery.mapIt(it.address)
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
updateInFlight(failedAddresses, false)
if blocksDelivery.len > 0:
trace "Sending blocks to peer",
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
await self.network.request.sendBlocksDelivery(task.id, blocksDelivery)
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
task.peerWants.keepItIf(it.address notin successAddresses)
await self.network.request.sendBlocksDelivery(peerCtx.id, blockDeliveries)
codex_block_exchange_blocks_sent.inc(blockDeliveries.len.int64)
# Drops the batch from the peer's set of wanted blocks; i.e. assumes that after
# we send the blocks, then the peer no longer wants them, so we don't need to
# re-send them. Note that the send might still fail down the line and we will
# have removed those anyway. At that point, we rely on the requester performing
# a retry for the request to succeed.
peerCtx.wantedBlocks.keepItIf(it notin sent)
finally:
# Better safe than sorry: if an exception does happen, we don't want to keep
# those as sent, as it'll effectively prevent the blocks from ever being sent again.
peerCtx.blocksSent.keepItIf(it notin wantedBlocks)
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
## process tasks
@ -646,11 +979,47 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
while self.blockexcRunning:
let peerCtx = await self.taskQueue.pop()
await self.taskHandler(peerCtx)
except CancelledError:
trace "block exchange task runner cancelled"
except CatchableError as exc:
error "error running block exchange task", error = exc.msg
info "Exiting blockexc task runner"
proc selectRandom*(
peers: seq[BlockExcPeerCtx]
): BlockExcPeerCtx {.gcsafe, raises: [].} =
if peers.len == 1:
return peers[0]
proc evalPeerScore(peer: BlockExcPeerCtx): float =
let
loadPenalty = peer.blocksRequested.len.float * 2.0
successRate =
if peer.exchanged > 0:
peer.exchanged.float / (peer.exchanged + peer.blocksRequested.len).float
else:
0.5
failurePenalty = (1.0 - successRate) * 5.0
return loadPenalty + failurePenalty
let
scores = peers.mapIt(evalPeerScore(it))
maxScore = scores.max() + 1.0
weights = scores.mapIt(maxScore - it)
var totalWeight = 0.0
for w in weights:
totalWeight += w
var r = rand(totalWeight)
for i, weight in weights:
r -= weight
if r <= 0.0:
return peers[i]
return peers[^1]
proc new*(
T: type BlockExcEngine,
localStore: BlockStore,
@ -660,7 +1029,9 @@ proc new*(
advertiser: Advertiser,
peerStore: PeerCtxStore,
pendingBlocks: PendingBlocksManager,
maxBlocksPerMessage = DefaultMaxBlocksPerMessage,
concurrentTasks = DefaultConcurrentTasks,
selectPeer: PeerSelector = selectRandom,
): BlockExcEngine =
## Create new block exchange engine instance
##
@ -673,23 +1044,13 @@ proc new*(
wallet: wallet,
concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures(),
maxBlocksPerMessage: maxBlocksPerMessage,
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery,
advertiser: advertiser,
selectPeer: selectPeer,
)
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
await self.setupPeer(peerId)
else:
self.dropPeer(peerId)
if not isNil(network.switch):
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler(
peer: PeerId, wantList: WantList
): Future[void] {.async: (raises: []).} =
@ -715,12 +1076,24 @@ proc new*(
): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment)
proc peerAddedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
await self.peerAddedHandler(peer)
proc peerDepartedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
self.evictPeer(peer)
network.handlers = BlockExcHandlers(
onWantList: blockWantListHandler,
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler,
onPeerJoined: peerAddedHandler,
onPeerDeparted: peerDepartedHandler,
)
return self

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -34,7 +34,7 @@ declareGauge(
const
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 500.millis
DefaultRetryInterval* = 2.seconds
type
RetriesExhaustedError* = object of CatchableError
@ -42,7 +42,7 @@ type
BlockReq* = object
handle*: BlockHandle
inFlight*: bool
requested*: ?PeerId
blockRetries*: int
startTime*: int64
@ -50,12 +50,13 @@ type
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, inFlight = false
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
@ -65,11 +66,13 @@ proc getWantHandle*(
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
requested: requested,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
@ -86,9 +89,22 @@ proc getWantHandle*(
return handle
proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, inFlight = false
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), inFlight)
self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
@ -108,9 +124,6 @@ proc resolve*(
blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else:
trace "Block handle already finished", address = bd.address
@ -128,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
## Set inflight status for a block
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
##
self.blocks.withValue(address, pending):
pending[].inFlight = inFlight
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##
if self.isRequested(address):
return false
self.blocks.withValue(address, pending):
result = pending[].inFlight
pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -35,15 +35,14 @@ const
DefaultMaxInflight* = 100
type
WantListHandler* =
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
PaymentHandler* =
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
@ -51,6 +50,9 @@ type
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc(
id: PeerId,
@ -240,96 +242,116 @@ proc handlePayment(
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} =
## handle rpc messages
##
if msg.wantList.entries.len > 0:
b.trackedFutures.track(b.handleWantList(peer, msg.wantList))
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0:
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload))
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0:
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences))
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account):
b.trackedFutures.track(b.handleAccount(peer, account))
self.trackedFutures.track(self.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment):
b.trackedFutures.track(b.handlePayment(peer, payment))
self.trackedFutures.track(self.handlePayment(peer, payment))
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer
##
if peer in b.peers:
return b.peers.getOrDefault(peer, nil)
if peer in self.peers:
return self.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError])
.} =
try:
trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec)
return await self.switch.dial(peer, Codec)
except CancelledError as error:
raise error
except CatchableError as exc:
trace "Unable to connect to blockexc peer", exc = exc.msg
if not isNil(b.getConn):
getConn = b.getConn
if not isNil(self.getConn):
getConn = self.getConn
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await b.rpcHandler(p, msg)
await self.rpcHandler(p, msg)
# create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
debug "Created new blockexc peer", peer
b.peers[peer] = blockExcPeer
self.peers[peer] = blockExcPeer
return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
## Perform initial setup, such as want
## list exchange
##
discard b.getOrCreatePeer(peer)
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer
##
if b.isSelf(peer.peerId):
if self.isSelf(peer.peerId):
trace "Skipping dialing self", peer = peer.peerId
return
if peer.peerId in b.peers:
if peer.peerId in self.peers:
trace "Already connected to peer", peer = peer.peerId
return
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
proc dropPeer*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
trace "Dropping peer", peer
try:
if not self.switch.isNil:
await self.switch.disconnect(peer)
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
discard self.getOrCreatePeer(peer)
if not self.handlers.onPeerJoined.isNil:
await self.handlers.onPeerJoined(peer)
proc handlePeerDeparted*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Cleanup disconnected peer
##
trace "Dropping peer", peer
b.peers.del(peer)
trace "Cleaning up departed peer", peer
self.peers.del(peer)
if not self.handlers.onPeerDeparted.isNil:
await self.handlers.onPeerDeparted(peer)
method init*(self: BlockExcNetwork) =
method init*(self: BlockExcNetwork) {.raises: [].} =
## Perform protocol initialization
##
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
): Future[void] {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId)
await self.handlePeerJoined(peerId)
elif event.kind == PeerEventKind.Left:
await self.handlePeerDeparted(peerId)
else:
self.dropPeer(peerId)
warn "Unknown peer event", event
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -24,10 +24,9 @@ logScope:
const DefaultYieldInterval = 50.millis
type
ConnProvider* =
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
NetworkPeer* = ref object of RootObj
id*: PeerId
@ -65,7 +64,9 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
trace "Detaching read loop", peer = self.id, connId = conn.oid
warn "Detaching read loop", peer = self.id, connId = conn.oid
if self.sendConn == conn:
self.sendConn = nil
await conn.close()
proc connect*(
@ -89,7 +90,12 @@ proc send*(
return
trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg))
try:
await conn.writeLp(protobufEncode(msg))
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
func new*(
T: type NetworkPeer,

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -25,28 +25,77 @@ import ../../logutils
export payments, nitro
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
if staleness and self.refreshInProgress:
trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
staleness
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
price += precense[].price
price
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -62,21 +62,23 @@ func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if peer.peerHave.anyIt(it == address):
if address in peer:
res.with.add(peer)
else:
res.without.add(peer)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,7 +9,6 @@
import std/hashes
import std/sequtils
import pkg/stew/endians2
import message
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash =
hash(e.address)

View File

@ -1,4 +1,4 @@
# Protocol of data exchange between Codex nodes
# Protocol of data exchange between Logos Storage nodes
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
@ -25,11 +25,15 @@ type
WantListEntry* = object
address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries

View File

@ -1,4 +1,4 @@
// Protocol of data exchange between Codex nodes.
// Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,16 +9,14 @@
import std/tables
import std/sugar
import std/hashes
export tables
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import pkg/stew/[byteutils, endians2]
import pkg/questionable
import pkg/questionable/results
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
else:
"cid: " & $a.cid
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/questionable
import pkg/questionable/results
@ -31,7 +28,7 @@ type
ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
gcsafe, async: (raises: [ChunkerError, CancelledError])
async: (raises: [ChunkerError, CancelledError])
.}
# Reader that splits input data into fixed-size chunks
@ -77,7 +74,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var res = 0
try:
while res < len:
@ -105,7 +102,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var total = 0
try:
while total < len:

View File

@ -1,6 +1,7 @@
{.push raises: [].}
import pkg/chronos
import pkg/stew/endians2
import pkg/upraises
import pkg/stint
type
@ -8,10 +9,12 @@ type
SecondsSince1970* = int64
Timeout* = object of CatchableError
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} =
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
raiseAssert "not implemented"
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
method waitUntil*(
clock: Clock, time: SecondsSince1970
) {.base, async: (raises: [CancelledError]).} =
raiseAssert "not implemented"
method start*(clock: Clock) {.base, async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,6 +12,7 @@ import std/strutils
import std/os
import std/tables
import std/cpuinfo
import std/net
import pkg/chronos
import pkg/taskpools
@ -21,7 +22,6 @@ import pkg/confutils
import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2
@ -56,10 +56,21 @@ type
codexNode: CodexNodeRef
repoStore: RepoStore
maintenance: BlockMaintainer
taskpool: Taskpool
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
func config*(self: CodexServer): CodexConf =
return self.config
func node*(self: CodexServer): CodexNodeRef =
return self.codexNode
func repoStore*(self: CodexServer): RepoStore =
return self.repoStore
proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1
trace "Checking sync state of Ethereum provider..."
@ -83,7 +94,9 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
error "Persistence enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(config.ethProvider)
let provider = JsonRpcProvider.new(
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
@ -103,7 +116,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config)
let deploy = Deployment.new(provider, config.marketplaceAddress)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
@ -125,7 +138,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when codex_enable_proof_failures:
when storage_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
@ -156,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config
if s.isStarted:
warn "Storage server already started, skipping"
return
trace "Starting Storage node", config = $s.config
await s.repoStore.start()
s.maintenance.start()
await s.codexNode.switch.start()
@ -172,24 +189,55 @@ proc start*(s: CodexServer) {.async.} =
await s.bootstrapInteractions()
await s.codexNode.start()
s.restServer.start()
if s.restServer != nil:
s.restServer.start()
s.isStarted = true
proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node"
if not s.isStarted:
warn "Storage is not started"
return
let res = await noCancel allFinishedFailed[void](
notice "Stopping Storage node"
var futures =
@[
s.restServer.stop(),
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
]
)
if s.restServer != nil:
futures.add(s.restServer.stop())
let res = await noCancel allFinishedFailed[void](futures)
if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len
raiseAssert "Failed to stop codex node"
error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop Storage node"
proc close*(s: CodexServer) {.async.} =
var futures = @[s.codexNode.close(), s.repoStore.close()]
let res = await noCancel allFinishedFailed[void](futures)
if not s.taskpool.isNil:
try:
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
proc shutdown*(server: CodexServer) {.async.} =
await server.stop()
await server.close()
proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
@ -205,7 +253,7 @@ proc new*(
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr})
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
.build()
var
@ -289,7 +337,7 @@ proc new*(
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
@ -314,10 +362,13 @@ proc new*(
taskPool = taskpool,
)
var restServer: RestServerRef = nil
if config.apiBindAddress.isSome:
restServer = RestServerRef
.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort),
initTAddress(config.apiBindAddress.get(), config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high,
)
@ -331,4 +382,5 @@ proc new*(
restServer: restServer,
repoStore: repoStore,
maintenance: maintenance,
taskpool: taskpool,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
{.pop.}
import std/options
import std/parseutils
import std/strutils
import std/typetraits
import std/net
import pkg/chronos
import pkg/chronicles/helpers
@ -27,13 +29,12 @@ import pkg/confutils/std/net
import pkg/toml_serialization
import pkg/metrics
import pkg/metrics/chronos_httpserver
import pkg/stew/shims/net as stewnet
import pkg/stew/shims/parseutils
import pkg/stew/byteutils
import pkg/libp2p
import pkg/ethers
import pkg/questionable
import pkg/questionable/results
import pkg/stew/base64
import ./codextypes
import ./discovery
@ -44,15 +45,16 @@ import ./utils
import ./nat
import ./utils/natutils
from ./contracts/config import DefaultRequestCacheSize
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
from ./validationconfig import MaxSlots, ValidationGroups
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
type ThreadCount* = distinct Natural
@ -61,21 +63,19 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.}
proc defaultDataDir*(): string =
let dataDir =
when defined(windows):
"AppData" / "Roaming" / "Codex"
"AppData" / "Roaming" / "Storage"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
"Library" / "Application Support" / "Storage"
else:
".cache" / "codex"
".cache" / "storage"
getHomeDir() / dataDir
const
codex_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false
storage_enable_api_debug_peers* {.booldefine.} = false
storage_enable_proof_failures* {.booldefine.} = false
storage_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
DefaultCircuitDir* = defaultDataDir() / "circuits"
DefaultThreadCount* = ThreadCount(0)
type
@ -137,9 +137,9 @@ type
.}: Port
dataDir* {.
desc: "The directory where codex will store configuration and data",
defaultValue: DefaultDataDir,
defaultValueDesc: $DefaultDataDir,
desc: "The directory where Storage will store configuration and data",
defaultValue: defaultDataDir(),
defaultValueDesc: "",
abbr: "d",
name: "data-dir"
.}: OutDir
@ -198,14 +198,16 @@ type
.}: ThreadCount
agentString* {.
defaultValue: "Codex",
defaultValue: "Logos Storage",
desc: "Node agent string which is used as identifier in network",
name: "agent-string"
.}: string
apiBindAddress* {.
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
.}: string
desc: "The REST API bind address",
defaultValue: "127.0.0.1".some,
name: "api-bindaddr"
.}: Option[string]
apiPort* {.
desc: "The REST Api port",
@ -263,6 +265,13 @@ type
name: "block-mn"
.}: int
blockRetries* {.
desc: "Number of times to retry fetching a block before giving up",
defaultValue: DefaultBlockRetries,
defaultValueDesc: $DefaultBlockRetries,
name: "block-retries"
.}: int
cacheSize* {.
desc:
"The size of the block cache, 0 disables the cache - " &
@ -370,34 +379,43 @@ type
hidden
.}: uint16
maxPriorityFeePerGas* {.
desc:
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
defaultValue: DefaultMaxPriorityFeePerGas,
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
name: "max-priority-fee-per-gas",
hidden
.}: uint64
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Codex will store proof circuit data",
defaultValue: DefaultCircuitDir,
defaultValueDesc: $DefaultCircuitDir,
desc: "Directory where Storage will store proof circuit data",
defaultValue: defaultDataDir() / "circuits",
defaultValueDesc: "data/circuits",
abbr: "cd",
name: "circuit-dir"
.}: OutDir
circomR1cs* {.
desc: "The r1cs file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.r1cs",
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs",
defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
defaultValueDesc: "data/circuits/proof_main.r1cs",
name: "circom-r1cs"
.}: InputFile
circomWasm* {.
desc: "The wasm file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.wasm",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm",
defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
defaultValueDesc: "data/circuits/proof_main.wasm",
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.zkey",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey",
defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
defaultValueDesc: "data/circuits/proof_main.zkey",
name: "circom-zkey"
.}: InputFile
@ -467,7 +485,7 @@ func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string =
let tag = strip(staticExec("git tag"))
let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace:
return "untagged build"
return tag
@ -478,7 +496,8 @@ proc getCodexRevision(): string =
return res
proc getCodexContractsRevision(): string =
let res = strip(staticExec("git rev-parse --short HEAD:vendor/codex-contracts-eth"))
let res =
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
return res
proc getNimBanner(): string =
@ -491,67 +510,85 @@ const
nimBanner* = getNimBanner()
codexFullVersion* =
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
"Codex contracts revision: " & codexContractsRevision & "\p" & nimBanner
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
proc parseCmdArg*(
T: typedesc[MultiAddress], input: string
): MultiAddress {.upraises: [ValueError].} =
): MultiAddress {.raises: [ValueError].} =
var ma: MultiAddress
try:
let res = MultiAddress.init(input)
if res.isOk:
ma = res.get()
else:
warn "Invalid MultiAddress", input = input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
except LPError as exc:
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
quit QuitFailure
ma
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} =
let count = parseInt(input)
if count != 0 and count < 2:
warn "Invalid number of threads", input = input
quit QuitFailure
ThreadCount(count)
proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
try:
let count = parseInt(p)
if count != 0 and count < 2:
return err("Invalid number of threads: " & p)
return ok(ThreadCount(count))
except ValueError as e:
return err("Invalid number of threads: " & p & ", error=" & e.msg)
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
proc parseCmdArg*(T: type ThreadCount, input: string): T =
let val = ThreadCount.parse(input)
if val.isErr:
fatal "Cannot parse the thread count.", input = input, error = val.error()
quit QuitFailure
return val.get()
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
var res: SignedPeerRecord
try:
if not res.fromURI(uri):
warn "Invalid SignedPeerRecord uri", uri = uri
quit QuitFailure
except LPError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
res
if not res.fromURI(p):
return err("The uri is not a valid SignedPeerRecord: " & p)
return ok(res)
except LPError, Base64Error:
let e = getCurrentException()
return err(e.msg)
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
let res = SignedPeerRecord.parse(uri)
if res.isErr:
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
quit QuitFailure
return res.get()
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
case p.toLowerAscii
of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
else:
if p.startsWith("extip:"):
try:
let ip = parseIpAddress(p[6 ..^ 1])
NatConfig(hasExtIp: true, extIp: ip)
return ok(NatConfig(hasExtIp: true, extIp: ip))
except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1]
raise newException(ValueError, error)
return err(error)
else:
let error = "Not a valid NAT option: " & p
raise newException(ValueError, error)
return err("Not a valid NAT option: " & p)
proc parseCmdArg*(T: type NatConfig, p: string): T =
let res = NatConfig.parse(p)
if res.isErr:
fatal "Cannot parse the NAT config.", error = res.error(), input = p
quit QuitFailure
return res.get()
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[]
@ -559,25 +596,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
proc parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get()
proc parseCmdArg*(T: type NBytes, val: string): T =
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
var num = 0'i64
let count = parseSize(val, num, alwaysBin = true)
let count = parseSize(p, num, alwaysBin = true)
if count == 0:
warn "Invalid number of bytes", nbytes = val
return err("Invalid number of bytes: " & p)
return ok(NBytes(num))
proc parseCmdArg*(T: type NBytes, val: string): T =
let res = NBytes.parse(val)
if res.isErr:
fatal "Cannot parse NBytes.", error = res.error(), input = val
quit QuitFailure
NBytes(num)
return res.get()
proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration
let count = parseDuration(val, dur)
if count == 0:
warn "Cannot parse duration", dur = dur
fatal "Cannot parse duration", dur = dur
quit QuitFailure
dur
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
@ -588,7 +631,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
try:
val = SignedPeerRecord.parseCmdArg(uri)
except LPError as err:
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
quit QuitFailure
proc readValue*(r: var TomlReader, val: var MultiAddress) =
@ -600,12 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk:
val = res.get()
else:
warn "Invalid MultiAddress", input = input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
proc readValue*(
r: var TomlReader, val: var NBytes
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var value = 0'i64
var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true)
@ -616,7 +659,7 @@ proc readValue*(
proc readValue*(
r: var TomlReader, val: var ThreadCount
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
try:
val = parseCmdArg(ThreadCount, str)
@ -625,7 +668,7 @@ proc readValue*(
proc readValue*(
r: var TomlReader, val: var Duration
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
var dur: Duration
let count = parseDuration(str, dur)
@ -692,7 +735,7 @@ proc stripAnsi*(v: string): string =
res
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
# Updates log levels (without clearing old ones)
let directives = logLevel.split(";")
try:
@ -761,7 +804,7 @@ proc setupLogging*(conf: CodexConf) =
of LogKind.None:
noOutput
when codex_enable_log_counter:
when storage_enable_log_counter:
var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter)
@ -772,15 +815,6 @@ proc setupLogging*(conf: CodexConf) =
else:
defaultChroniclesStream.outputs[0].writer = writer
try:
updateLogLevel(conf.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
proc setupMetrics*(config: CodexConf) =
if config.metricsEnabled:
let metricsAddress = config.metricsAddress

View File

@ -0,0 +1,8 @@
const ContentIdsExts = [
multiCodec("codex-root"),
multiCodec("codex-manifest"),
multiCodec("codex-block"),
multiCodec("codex-slot-root"),
multiCodec("codex-proving-root"),
multiCodec("codex-slot-cell"),
]

View File

@ -1,13 +1,13 @@
Codex Contracts in Nim
Logos Storage Contracts in Nim
=======================
Nim API for the [Codex smart contracts][1].
Nim API for the [Logos Storage smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1].
storage contract, see [Logos Storage Contracts][1].
Smart contract
--------------
@ -144,5 +144,5 @@ await storage
.markProofAsMissing(id, period)
```
[1]: https://github.com/status-im/codex-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md

View File

@ -1,3 +1,5 @@
{.push raises: [].}
import std/times
import pkg/ethers
import pkg/questionable
@ -72,7 +74,9 @@ method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
method waitUntil*(
clock: OnChainClock, time: SecondsSince1970
) {.async: (raises: [CancelledError]).} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -5,6 +5,7 @@ import pkg/questionable/results
export contractabi
const DefaultRequestCacheSize* = 128.uint16
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
type
MarketplaceConfig* = object

View File

@ -9,7 +9,7 @@ import ./marketplace
type Deployment* = ref object
provider: Provider
config: CodexConf
marketplaceAddressOverride: ?Address
const knownAddresses = {
# Hardhat localhost network
@ -18,9 +18,12 @@ const knownAddresses = {
# Taiko Alpha-3 Testnet
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - May 30 2025 07:33:06 AM (+00:00 UTC)
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0x7c7a749DE7156305E55775e7Ab3931abd6f7300E")}.toTable,
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
# Linea (Status)
"1660990954":
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
@ -32,12 +35,16 @@ proc getKnownAddress(T: type, chainId: UInt256): ?Address =
return knownAddresses[id].getOrDefault($T, Address.none)
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
Deployment(provider: provider, config: config)
proc new*(
_: type Deployment,
provider: Provider,
marketplaceAddressOverride: ?Address = none Address,
): Deployment =
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
when contract is Marketplace:
if address =? deployment.config.marketplaceAddress:
if address =? deployment.marketplaceAddressOverride:
return some address
let chainId = await deployment.provider.getChainId()

View File

@ -1,7 +1,6 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/upraises
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
@ -279,9 +278,10 @@ method fillSlot(
# happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
let gasLimit = (gas * 110) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling fillSlot on contract"
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1)
@ -303,12 +303,15 @@ method freeSlot*(
# the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner()
# Add 10% to gas estimate to deal with different evm code flow when we
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient
)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(
slotId,
@ -320,10 +323,13 @@ method freeSlot*(
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
# Add 10% to gas estimate to deal with different evm code flow when we
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(slotId, overrides)
@ -366,27 +372,34 @@ method submitProof*(
market: OnChainMarket, id: SlotId, proof: Groth16Proof
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to submit proof"):
discard await market.contract.submitProof(id, proof).confirm(1)
try:
discard await market.contract.submitProof(id, proof).confirm(1)
except Proofs_InvalidProof as parent:
raise newException(
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"):
# Add 10% to gas estimate to deal with different evm code flow when we
# Add 50% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
let gasLimit = (gas * 150) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling markProofAsMissing on contract",
estimatedGas = gas, gasLimit = gasLimit
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
method canProofBeMarkedAsMissing*(
method canMarkProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async.} =
let provider = market.contract.provider
let contractWithoutSigner = market.contract.connect(provider)
let overrides = CallOverrides(blockTag: some BlockTag.pending)
): Future[bool] {.async: (raises: [CancelledError]).} =
try:
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
let overrides = CallOverrides(blockTag: some BlockTag.pending)
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
return true
except EthersError as e:
trace "Proof cannot be marked as missing", msg = e.msg
@ -397,10 +410,13 @@ method reserveSlot*(
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"):
try:
# Add 10% to gas estimate to deal with different evm code flow when we
# Add 25% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
let gasLimit = (gas * 125) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
@ -419,7 +435,7 @@ method canReserveSlot*(
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
@ -433,7 +449,7 @@ method subscribeRequests*(
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
@ -460,7 +476,7 @@ method subscribeSlotFilled*(
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
@ -474,7 +490,7 @@ method subscribeSlotFreed*(
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
@ -489,7 +505,7 @@ method subscribeSlotReservationsFull*(
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -503,7 +519,7 @@ method subscribeFulfillment(
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -518,7 +534,7 @@ method subscribeFulfillment(
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -532,7 +548,7 @@ method subscribeRequestCancelled*(
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -547,7 +563,7 @@ method subscribeRequestCancelled*(
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -561,7 +577,7 @@ method subscribeRequestFailed*(
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -576,7 +592,7 @@ method subscribeRequestFailed*(
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return

View File

@ -178,6 +178,17 @@ proc markProofAsMissing*(
]
.}
proc canMarkProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
Proofs_ProofAlreadyMarkedMissing,
]
.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): Confirmable {.contract.}

View File

@ -2,7 +2,7 @@ import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto
import pkg/nimcrypto/keccak
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,13 +10,13 @@
{.push raises: [].}
import std/algorithm
import std/net
import std/sequtils
import pkg/chronos
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
import pkg/questionable
import pkg/questionable/results
import pkg/stew/shims/net
import pkg/contractabi/address as ca
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
from pkg/nimcrypto import keccak256
@ -43,6 +43,7 @@ type Discovery* = ref object of RootObj
# record to advertice node connection information, this carry any
# address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
isStarted: bool
proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id
@ -157,7 +158,7 @@ method provide*(
method removeProvider*(
d: Discovery, peerId: PeerId
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
): Future[void] {.base, async: (raises: [CancelledError]).} =
## Remove provider from providers table
##
@ -203,10 +204,15 @@ proc start*(d: Discovery) {.async: (raises: []).} =
try:
d.protocol.open()
await d.protocol.start()
d.isStarted = true
except CatchableError as exc:
error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async: (raises: []).} =
if not d.isStarted:
warn "Discovery not started, skipping stop"
return
try:
await noCancel d.protocol.closeWait()
except CatchableError as exc:

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import ../stores

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/[sugar, atomics, sequtils]
@ -25,6 +22,7 @@ import ../logutils
import ../manifest
import ../merkletree
import ../stores
import ../clock
import ../blocktype as bt
import ../utils
import ../utils/asynciter
@ -120,19 +118,22 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
(idx - step) div steps
proc getPendingBlocks(
self: Erasure, manifest: Manifest, indicies: seq[int]
self: Erasure, manifest: Manifest, indices: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
var
proc attachIndex(
fut: Future[?!bt.Block], i: int
): Future[(?!bt.Block, int)] {.async.} =
## avoids closure capture issues
return (await fut, i)
for blockIndex in indices:
# request blocks from the store
pendingBlocks = indicies.map(
(i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
(r: ?!bt.Block) => (r, i)
) # Get the data blocks (first K)
)
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
pendingBlocks.add(attachIndex(fut, blockIndex))
proc isFinished(): bool =
pendingBlocks.len == 0
@ -168,16 +169,16 @@ proc prepareEncodingData(
strategy = params.strategy.init(
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indicies = toSeq(strategy.getIndicies(step))
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter =
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
return failure(err)
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
@ -185,7 +186,7 @@ proc prepareEncodingData(
resolved.inc()
for idx in indicies.filterIt(it >= manifest.blocksCount):
for idx in indices.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
@ -218,8 +219,8 @@ proc prepareDecodingData(
strategy = encoded.protectedStrategy.init(
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
var
dataPieces = 0
@ -233,7 +234,7 @@ proc prepareDecodingData(
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let pos = indexToPos(encoded.steps, idx, step)
@ -352,7 +353,7 @@ proc asyncEncode*(
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding failed")
return failure("Leopard encoding task failed")
success()
@ -382,6 +383,8 @@ proc encodeData(
var
data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
defer:
freeDoubleArray(parity, params.ecM)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
@ -406,8 +409,6 @@ proc encodeData(
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(parity, params.ecM)
var idx = params.rounded + step
for j in 0 ..< params.ecM:
@ -419,8 +420,8 @@ proc encodeData(
trace "Adding parity block", cid = blk.cid, idx
cids[idx] = blk.cid
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
idx.inc(params.steps)
@ -544,17 +545,13 @@ proc asyncDecode*(
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding failed")
return failure("Leopard decoding task failed")
success()
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
proc decodeInternal(
self: Erasure, encoded: Manifest
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
@ -578,6 +575,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
defer:
freeDoubleArray(recovered, encoded.ecK)
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
@ -604,8 +603,6 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(recovered, encoded.ecK)
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
@ -619,10 +616,12 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return failure(error)
trace "Recovered block", cid = blk.cid, index = i
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
@ -634,6 +633,19 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
finally:
decoder.release()
return (cids, recoveredIndices).success
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
@ -655,6 +667,44 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return decoded.success
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
## Repair a protected manifest by reconstructing the full dataset
##
## `encoded` - the encoded (protected) manifest to
## be repaired
##
without (cids, _) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
without repaired =? (
await self.encode(
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
)
), err:
return failure(err)
if repaired.treeCid != encoded.treeCid:
return failure(
"Original tree root differs from the repaired tree root encoded out of recovered data"
)
return success()
proc start*(self: Erasure) {.async.} =
return

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -24,13 +24,17 @@ type
IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError
IndexingWrongGroupCountError* = object of IndexingError
IndexingWrongPadBlockCountError* = object of IndexingError
IndexingStrategy* = object
strategyType*: StrategyType
strategyType*: StrategyType # Indexing strategy algorithm
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
step*: int
iterations*: int # Number of iteration steps (0 ..< iterations)
step*: int # Step size between generated indices
groupCount*: int # Number of groups to partition indices into
padBlockCount*: int # Number of padding blocks to append per group
func checkIteration(
self: IndexingStrategy, iteration: int
@ -44,39 +48,47 @@ func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}:
Iter[int].new(first, last, step)
func getLinearIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
let
first = self.firstIndex + iteration * self.step
last = min(first + self.step - 1, self.lastIndex)
getIter(first, last, 1)
func getSteppedIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
let
first = self.firstIndex + iteration
last = self.lastIndex
getIter(first, last, self.iterations)
func getIndicies*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
case self.strategyType
of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration)
self.getLinearIndices(iteration)
of StrategyType.SteppedStrategy:
self.getSteppedIndicies(iteration)
self.getSteppedIndices(iteration)
func getIndices*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
{.cast(noSideEffect).}:
Iter[int].new(
iterator (): int {.gcsafe.} =
for value in self.getStrategyIndices(iteration):
yield value
for i in 0 ..< self.padBlockCount:
yield self.lastIndex + (iteration + 1) + i * self.groupCount
)
func init*(
strategy: StrategyType, firstIndex, lastIndex, iterations: int
strategy: StrategyType,
firstIndex, lastIndex, iterations: int,
groupCount = 0,
padBlockCount = 0,
): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex:
raise newException(
@ -91,10 +103,24 @@ func init*(
"iterations (" & $iterations & ") must be greater than zero.",
)
if padBlockCount < 0:
raise newException(
IndexingWrongPadBlockCountError,
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
)
if padBlockCount > 0 and groupCount <= 0:
raise newException(
IndexingWrongGroupCountError,
"groupCount (" & $groupCount & ") must be greater than zero.",
)
IndexingStrategy(
strategyType: strategy,
firstIndex: firstIndex,
lastIndex: lastIndex,
iterations: iterations,
step: divUp((lastIndex - firstIndex + 1), iterations),
groupCount: groupCount,
padBlockCount: padBlockCount,
)

View File

@ -11,7 +11,7 @@
## 4. Remove usages of `nim-json-serialization` from the codebase
## 5. Remove need to declare `writeValue` for new types
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
##
## When declaring a new type, one should consider importing the `codex/logutils`
## module, and specifying `formatIt`. If textlines log output and json log output

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,11 +9,9 @@
# This module implements serialization and deserialization of Manifest
import pkg/upraises
import times
push:
{.upraises: [].}
{.push raises: [].}
import std/tables
import std/sequtils

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# This module defines all operations on Manifest
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec]

View File

@ -1,5 +1,4 @@
import pkg/chronos
import pkg/upraises
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
@ -20,17 +19,18 @@ type
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
@ -204,9 +204,9 @@ method markProofAsMissing*(
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canProofBeMarkedAsMissing*(
method canMarkProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async.} =
): Future[bool] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method reserveSlot*(
@ -274,7 +274,7 @@ method subscribeProofSubmission*(
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
method unsubscribe*(subscription: Subscription) {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p
import pkg/questionable

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -47,28 +47,6 @@ type
CodexProof* = ref object of ByteProof
mcodec*: MultiCodec
# CodeHashes is not exported from libp2p
# So we need to recreate it instead of
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var proof = CodexProof(mcodec: self.mcodec)
@ -128,17 +106,12 @@ proc `$`*(self: CodexProof): string =
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
## Compress two hashes
##
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/codex-storage/nim-codex/issues/1162
let input = @x & @y & @[key.byte]
var digest = hashes.sha256.hash(input)
success @digest
let digest = ?MultiHash.digest(codec, input).mapFailure
success digest.digestBytes
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
@ -147,12 +120,12 @@ func init*(
return failure "Empty leaves"
let
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
compress(x, y, key, mcodec)
digestSize = ?mcodec.digestSize.mapFailure
Zero: ByteHash = newSeq[byte](digestSize)
if mhash.size != leaves[0].len:
if digestSize != leaves[0].len:
return failure "Invalid hash length"
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
@ -190,12 +163,12 @@ proc fromNodes*(
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
if mhash.size != nodes[0].len:
if digestSize != nodes[0].len:
return failure "Invalid hash length"
var
@ -228,10 +201,10 @@ func init*(
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
success CodexProof(
compress: compressor,

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

11
codex/multicodec_exts.nim Normal file
View File

@ -0,0 +1,11 @@
const CodecExts = [
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
("codex-manifest", 0xCD01),
("codex-block", 0xCD02),
("codex-root", 0xCD03),
("codex-slot-root", 0xCD04),
("codex-proving-root", 0xCD05),
("codex-slot-cell", 0xCD06),
]

40
codex/multihash_exts.nim Normal file
View File

@ -0,0 +1,40 @@
import blscurve/bls_public_exports
import pkg/constantine/hashes
import poseidon2
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
if len(output) > 0:
let digest = hashes.sha256.hash(data)
copyMem(addr output[0], addr digest[0], 32)
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.Sponge.digest(data).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
const Sha2256MultiHash* = MHash(
mcodec: multiCodec("sha2-256"),
size: sha256.sizeDigest,
coder: sha2_256hash_constantine,
)
const HashExts = [
# override sha2-256 hash function
Sha2256MultiHash,
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
size: 32,
coder: poseidon2_sponge_rate2,
),
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
size: 32,
coder: poseidon2_merkle_2kb_sponge,
),
]

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -10,10 +10,10 @@
import
std/[options, os, strutils, times, net, atomics],
stew/shims/net as stewNet,
stew/[objects, results],
stew/[objects],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net
json_serialization/std/net,
results
import pkg/chronos
import pkg/chronicles

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -44,7 +44,7 @@ import ./indexingstrategy
import ./utils
import ./errors
import ./logutils
import ./utils/asynciter
import ./utils/safeasynciter
import ./utils/trackedfutures
export logutils
@ -52,7 +52,10 @@ export logutils
logScope:
topics = "codex node"
const DefaultFetchBatch = 10
const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
Contracts* =
@ -78,9 +81,9 @@ type
CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
BatchProc* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: CodexNodeRef): Switch =
return self.switch
@ -186,34 +189,62 @@ proc fetchBatched*(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
while not iter.finished:
let blockFutures = collect:
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
# Sliding window: maintain batchSize blocks in-flight
let
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
refillSize = max(refillThreshold, 1)
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
if blockFutures.len == 0:
var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
var blockResults = await self.networkStore.getBlocks(addresses)
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue
without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err:
trace "Some blocks failed to fetch", err = err.msg
return failure(err)
inc(successfulBlocks)
inc(completedInWindow)
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value)
if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
let numOfFailedBlocks = blockResults.len - blocks.len
if numOfFailedBlocks > 0:
return
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")")
if completedInWindow >= refillThreshold and not iter.finished:
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
for i in 0 ..< refillSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption:
if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
if not iter.finished:
await sleepAsync(1.millis)
success()
proc fetchBatched*(
@ -403,6 +434,7 @@ proc store*(
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize,
onBlockStored: OnBlockStoredProc = nil,
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest
@ -432,6 +464,9 @@ proc store*(
if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
if not onBlockStored.isNil:
onBlockStored(chunk)
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -623,6 +658,7 @@ proc requestStorage*(
proc onStore(
self: CodexNodeRef,
request: StorageRequest,
expiry: SecondsSince1970,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
@ -638,10 +674,6 @@ proc onStore(
trace "Received a request to store a slot"
# TODO: Use the isRepairing to manage the slot download.
# If isRepairing is true, the slot has to be repaired before
# being downloaded.
without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err)
@ -651,8 +683,6 @@ proc onStore(
trace "Unable to create slots builder", err = err.msg
return failure(err)
let expiry = request.expiry
if slotIdx > manifest.slotRoots.high.uint64:
trace "Slot index not in manifest", slotIdx
return failure(newException(CodexError, "Slot index not in manifest"))
@ -663,7 +693,7 @@ proc onStore(
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
if res.failure.len > 0:
@ -676,32 +706,45 @@ proc onStore(
return success()
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err:
trace "Unable to get indicies from strategy", err = err.msg
return failure(err)
if isRepairing:
trace "start repairing slot", slotIdx
try:
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
if err =? (await erasure.repair(manifest)).errorOption:
error "Unable to erasure decode repairing manifest",
cid = manifest.treeCid, exc = err.msg
return failure(err)
except CatchableError as exc:
error "Error erasure decoding repairing manifest",
cid = manifest.treeCid, exc = exc.msg
return failure(exc.msg)
else:
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
trace "Unable to get indices from strategy", err = err.msg
return failure(err)
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
trace "Unable to build slot", err = err.msg
return failure(err)
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
@ -789,11 +832,12 @@ proc start*(self: CodexNodeRef) {.async.} =
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onStore(request, slot, onBatch, isRepairing)
self.onStore(request, expiry, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970
@ -837,14 +881,11 @@ proc start*(self: CodexNodeRef) {.async.} =
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node"
if not self.taskpool.isNil:
self.taskpool.shutdown()
await self.trackedFutures.cancelTracked()
if not self.engine.isNil:
@ -865,6 +906,7 @@ proc stop*(self: CodexNodeRef) {.async.} =
if not self.clock.isNil:
await self.clock.stop()
proc close*(self: CodexNodeRef) {.async.} =
if not self.networkStore.isNil:
await self.networkStore.close
@ -891,3 +933,10 @@ proc new*(
contracts: contracts,
trackedFutures: TrackedFutures(),
)
proc hasLocalBlock*(
self: CodexNodeRef, cid: Cid
): Future[bool] {.async: (raises: [CancelledError]).} =
## Returns true if the given Cid is present in the local store
return await (cid in self.networkStore.localStore)

View File

@ -30,12 +30,12 @@ method run*(
requestId = purchase.requestId
proc wait() {.async.} =
let done = newFuture[void]()
let done = newAsyncEvent()
proc callback(_: RequestId) =
done.complete()
done.fire()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done
await done.wait()
await subscription.unsubscribe()
proc withTimeout(future: Future[void]) {.async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sequtils
import std/mimetypes
@ -183,7 +180,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
router.api(MethodOptions, "/api/codex/v1/data") do(
router.api(MethodOptions, "/api/storage/v1/data") do(
resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -195,7 +192,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse:
router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner
##
@ -257,11 +254,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
finally:
await reader.closeWait()
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse:
let json = await formatManifestBlocks(node)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do(
router.api(MethodOptions, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -270,7 +267,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.api(MethodGet, "/api/codex/v1/data/{cid}") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -286,7 +283,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
await node.retrieveCid(cid.get(), local = true, resp = resp)
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do(
router.api(MethodDelete, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Deletes either a single block or an entire dataset
@ -307,7 +304,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do(
router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network to the local node
@ -328,7 +325,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network in a streaming
@ -347,7 +344,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download only the manifest.
@ -365,7 +362,23 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Only test if the give CID is available in the local store
##
var headers = buildCorsHeaders("GET", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
let cid = cid.get()
let hasCid = await node.hasLocalBlock(cid)
let json = %*{$cid: hasCid}
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
let json =
%RestRepoStore(
totalBlocks: repoStore.totalBlocks,
@ -378,7 +391,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
## Returns active slots for the host
@ -396,7 +409,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do(
router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
slotId: SlotId
) -> RestApiResponse:
## Returns active slot with id {slotId} for the host. Returns 404 if the
@ -426,7 +439,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
restAgent.toJson, contentType = "application/json", headers = headers
)
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Returns storage that is for sale
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -448,7 +461,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Add available storage to sell.
## Every time Availability's offer finishes, its capacity is
## returned to the availability.
@ -528,7 +541,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do(
router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -537,7 +550,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do(
router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId
) -> RestApiResponse:
## Updates Availability.
@ -625,7 +638,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500)
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do(
router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
id: AvailabilityId
) -> RestApiResponse:
## Gets Availability's reservations.
@ -669,7 +682,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do(
router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
cid: Cid
) -> RestApiResponse:
var headers = buildCorsHeaders("POST", allowedOrigin)
@ -779,7 +792,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do(
router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
id: PurchaseId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -811,7 +824,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
@ -833,7 +846,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
## various node management api's
##
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse:
## Returns node SPR in requested format, json or text.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -856,7 +869,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse:
## Returns node's peerId in requested format, json or text.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -875,7 +888,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do(
router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do(
peerId: PeerId, addrs: seq[MultiAddress]
) -> RestApiResponse:
## Connect to a peer
@ -913,7 +926,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse:
## Print rudimentary node information
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -933,7 +946,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
"",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
"codex": {
"storage": {
"version": $codexVersion,
"revision": $codexRevision,
"contracts": $codexContractsRevision,
@ -948,7 +961,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do(
router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do(
level: Option[string]
) -> RestApiResponse:
## Set log level at run time
@ -974,8 +987,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
when codex_enable_api_debug_peers:
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do(
when storage_enable_api_debug_peers:
router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do(
peerId: PeerId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/crypto/crypto
import pkg/bearssl/rand

View File

@ -22,7 +22,7 @@ import ./utils/exceptions
## Sales holds a list of available storage that it may sell.
##
## When storage is requested on the market that matches availability, the Sales
## object will instruct the Codex node to persist the requested data. Once the
## object will instruct the Logos Storage node to persist the requested data. Once the
## data has been persisted, it uploads a proof of storage to the market in an
## attempt to win a storage contract.
##
@ -148,26 +148,12 @@ proc cleanUp(
# Re-add items back into the queue to prevent small availabilities from
# draining the queue. Seen items will be ordered last.
if data.slotIndex <= uint16.high.uint64 and reprocessSlot and request =? data.request:
let res =
await noCancel sales.context.market.slotCollateral(data.requestId, data.slotIndex)
if res.isErr:
error "Failed to re-add item back to the slot queue: unable to calculate collateral",
error = res.error.msg
else:
let collateral = res.get()
let queue = sales.context.slotQueue
var seenItem = SlotQueueItem.init(
data.requestId,
data.slotIndex.uint16,
data.ask,
request.expiry,
seen = true,
collateral = collateral,
)
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(seenItem).errorOption:
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
let queue = sales.context.slotQueue
item.seen = true
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(item).errorOption:
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
let fut = sales.remove(agent)
sales.trackedFutures.track(fut)
@ -181,8 +167,9 @@ proc processSlot(
) {.async: (raises: [CancelledError]).} =
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
let agent =
newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest)
let agent = newSalesAgent(
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
)
let completed = newAsyncEvent()

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -27,9 +27,7 @@
## | UInt256 | totalRemainingCollateral | |
## +---------------------------------------------------+
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sequtils
import std/sugar
@ -38,7 +36,6 @@ import std/sequtils
import std/times
import pkg/chronos
import pkg/datastore
import pkg/nimcrypto
import pkg/questionable
import pkg/questionable/results
import pkg/stint
@ -55,6 +52,8 @@ import ../units
export requests
export logutils
from nimcrypto import randomBytes
logScope:
topics = "marketplace sales reservations"
@ -92,14 +91,10 @@ type
repo: RepoStore
OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.
upraises: [], gcsafe, async: (raises: [CancelledError]), closure
.}
IterDispose* =
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
upraises: [], gcsafe, async: (raises: [])
.}
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* =
proc(availability: Availability): Future[void] {.async: (raises: []).}
StorableIter* = ref object
finished*: bool
next*: GetNext
@ -351,7 +346,8 @@ proc updateAvailability(
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
oldAvailability.totalCollateral < obj.totalCollateral: # availability updated
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
# availability updated
# inform subscribers that Availability has been modified (with increased
# size)
if OnAvailabilitySaved =? self.OnAvailabilitySaved:

View File

@ -2,7 +2,6 @@ import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/upraises
import ../contracts/requests
import ../errors
import ../logutils
@ -11,6 +10,7 @@ import ./statemachine
import ./salescontext
import ./salesdata
import ./reservations
import ./slotqueue
export reservations
@ -42,10 +42,16 @@ proc newSalesAgent*(
requestId: RequestId,
slotIndex: uint64,
request: ?StorageRequest,
slotQueueItem = SlotQueueItem.none,
): SalesAgent =
var agent = SalesAgent.new()
agent.context = context
agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request)
agent.data = SalesData(
requestId: requestId,
slotIndex: slotIndex,
request: request,
slotQueueItem: slotQueueItem,
)
return agent
proc retrieveRequest*(agent: SalesAgent) {.async.} =
@ -106,14 +112,12 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
method onFulfilled*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
) {.base, gcsafe, raises: [].} =
let cancelled = agent.data.cancelled
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
cancelled.cancelSoon()
method onFailed*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
without request =? agent.data.request:
return
if agent.data.requestId == requestId:
@ -121,7 +125,7 @@ method onFailed*(
method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
) {.base, gcsafe, upraises: [].} =
) {.base, gcsafe, raises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex))

View File

@ -1,6 +1,5 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import pkg/libp2p/cid
import ../market
@ -24,17 +23,20 @@ type
slotQueue*: SlotQueue
simulateProofFailures*: int
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
BlocksCb* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnStore* = proc(
request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool
): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).}
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
blocksCb: BlocksCb,
isRepairing: bool,
): Future[?!void] {.async: (raises: [CancelledError]).}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, async: (raises: [CancelledError])
async: (raises: [CancelledError])
.}
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
async: (raises: [CancelledError])
.}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}

View File

@ -2,6 +2,7 @@ import pkg/chronos
import ../contracts/requests
import ../market
import ./reservations
import ./slotqueue
type SalesData* = ref object
requestId*: RequestId
@ -10,3 +11,4 @@ type SalesData* = ref object
slotIndex*: uint64
cancelled*: Future[void]
reservation*: ?Reservation
slotQueueItem*: ?SlotQueueItem

View File

@ -15,8 +15,7 @@ logScope:
topics = "marketplace slotqueue"
type
OnProcessSlot* =
proc(item: SlotQueueItem): Future[void] {.gcsafe, async: (raises: []).}
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg
@ -30,7 +29,7 @@ type
duration: uint64
pricePerBytePerSecond: UInt256
collateral: UInt256 # Collateral computed
expiry: uint64
expiry: ?uint64
seen: bool
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
@ -89,8 +88,9 @@ proc `<`*(a, b: SlotQueueItem): bool =
scoreA.addIf(a.collateral < b.collateral, 2)
scoreB.addIf(a.collateral > b.collateral, 2)
scoreA.addIf(a.expiry > b.expiry, 1)
scoreB.addIf(a.expiry < b.expiry, 1)
if expiryA =? a.expiry and expiryB =? b.expiry:
scoreA.addIf(expiryA > expiryB, 1)
scoreB.addIf(expiryA < expiryB, 1)
return scoreA > scoreB
@ -124,7 +124,7 @@ proc init*(
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: uint64,
expiry: ?uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
@ -139,6 +139,17 @@ proc init*(
seen: seen,
)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem.init(requestId, slotIndex, ask, some expiry, collateral, seen)
proc init*(
_: type SlotQueueItem,
request: StorageRequest,
@ -151,7 +162,7 @@ proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: uint64,
expiry: ?uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
if not ask.slots.inRange:
@ -167,10 +178,19 @@ proc init*(
Rng.instance.shuffle(items)
return items
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
SlotQueueItem.init(requestId, ask, some expiry, collateral)
proc init*(
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral)
return SlotQueueItem.init(request.id, request.ask, uint64.none, collateral)
proc inRange*(val: SomeUnsignedInt): bool =
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
@ -196,6 +216,9 @@ proc collateralPerByte*(self: SlotQueueItem): UInt256 =
proc seen*(self: SlotQueueItem): bool =
self.seen
proc `seen=`*(self: var SlotQueueItem, seen: bool) =
self.seen = seen
proc running*(self: SlotQueue): bool =
self.running

View File

@ -1,5 +1,4 @@
import pkg/questionable
import pkg/upraises
import ../errors
import ../utils/asyncstatemachine
import ../market
@ -12,21 +11,21 @@ export asyncstatemachine
type
SaleState* = ref object of State
SaleError* = ref object of CodexError
SaleError* = object of CodexError
method onCancelled*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
method onFailed*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: uint64
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
proc cancelledEvent*(request: StorageRequest): Event =

View File

@ -38,6 +38,7 @@ method run*(
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
let reservations = context.reservations
without onStore =? context.onStore:
@ -69,11 +70,21 @@ method run*(
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
try:
let slotId = slotId(request.id, data.slotIndex)
let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair
let requestId = request.id
let slotId = slotId(requestId, data.slotIndex)
let requestState = await market.requestState(requestId)
let isRepairing = (await market.slotState(slotId)) == SlotState.Repair
trace "Retrieving expiry"
var expiry: SecondsSince1970
if state =? requestState and state == RequestState.Started:
expiry = await market.getRequestEnd(requestId)
else:
expiry = await market.requestExpiresAt(requestId)
trace "Starting download"
if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption:
if err =?
(await onStore(request, expiry, data.slotIndex, onBlocks, isRepairing)).errorOption:
return some State(SaleErrored(error: err, reprocessSlot: false))
trace "Download complete"

View File

@ -1,6 +1,5 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ../statemachine
import ../salesagent

View File

@ -11,7 +11,7 @@ import ./cancelled
import ./failed
import ./proving
when codex_enable_proof_failures:
when storage_enable_proof_failures:
import ./provingsimulated
logScope:
@ -59,7 +59,7 @@ method run*(
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
return some State(SaleErrored(error: err))
when codex_enable_proof_failures:
when storage_enable_proof_failures:
if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures
return some State(

View File

@ -51,7 +51,9 @@ method run*(
await agent.subscribe()
without request =? data.request:
raiseAssert "no sale request"
error "request could not be retrieved", id = data.requestId
let error = newException(SaleError, "request could not be retrieved")
return some State(SaleErrored(error: error))
let slotId = slotId(data.requestId, data.slotIndex)
let state = await market.slotState(slotId)
@ -59,7 +61,7 @@ method run*(
return some State(SaleIgnored(reprocessSlot: false))
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
# due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
logScope:
slotIndex = data.slotIndex

View File

@ -1,5 +1,5 @@
import ../../conf
when codex_enable_proof_failures:
when storage_enable_proof_failures:
import std/strutils
import pkg/stint
import pkg/ethers
@ -40,7 +40,7 @@ when codex_enable_proof_failures:
try:
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
await market.submitProof(slot.id, Groth16Proof.default)
except Proofs_InvalidProof as e:
except ProofInvalidError as e:
discard # expected
except CancelledError as error:
raise error

View File

@ -38,6 +38,11 @@ method run*(
await agent.retrieveRequest()
await agent.subscribe()
without request =? data.request:
error "request could not be retrieved", id = data.requestId
let error = newException(SaleError, "request could not be retrieved")
return some State(SaleErrored(error: error))
let slotId = slotId(data.requestId, data.slotIndex)
let slotState = await market.slotState(slotId)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -113,17 +113,17 @@ func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural =
self.numBlockCells * self.numSlotBlocks
func slotIndiciesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
## Returns the slot indices.
##
self.strategy.getIndicies(slot).catch
self.strategy.getIndices(slot).catch
func slotIndicies*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
## Returns the slot indices.
##
if iter =? self.strategy.getIndicies(slot).catch:
if iter =? self.strategy.getIndices(slot).catch:
return toSeq(iter)
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
@ -184,7 +184,7 @@ proc getCellHashes*[T, H](
slotIndex = slotIndex
let hashes = collect(newSeq):
for i, blkIdx in self.strategy.getIndicies(slotIndex):
for i, blkIdx in self.strategy.getIndices(slotIndex):
logScope:
blkIdx = blkIdx
pos = i
@ -310,7 +310,7 @@ proc new*[T, H](
_: type SlotsBuilder[T, H],
store: BlockStore,
manifest: Manifest,
strategy = SteppedStrategy,
strategy = LinearStrategy,
cellSize = DefaultCellSize,
): ?!SlotsBuilder[T, H] =
if not manifest.protected:
@ -354,7 +354,14 @@ proc new*[T, H](
emptyBlock = newSeq[byte](manifest.blockSize.int)
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch
strategy =
?strategy.init(
0,
manifest.blocksCount - 1,
manifest.numSlots,
manifest.numSlots,
numPadSlotBlocks,
).catch
logScope:
numSlotBlocks = numSlotBlocks

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -53,7 +53,7 @@ proc getSample*[T, H](
cellsPerBlock = self.builder.numBlockCells
blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index
blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index
origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx]
origBlockIdx = self.builder.slotIndices(self.index)[blkSlotIdx]
# convert to original dataset block index
logScope:

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -29,7 +29,7 @@ type
Block
Both
CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, async: (raises: []).}
CidCallback* = proc(cid: Cid): Future[void] {.async: (raises: []).}
BlockStore* = ref object of RootObj
onBlockStored*: ?CidCallback
@ -65,6 +65,19 @@ method getBlock*(
raiseAssert("getBlock by addr not implemented!")
method completeBlock*(
self: BlockStore, address: BlockAddress, blk: Block
) {.base, gcsafe.} =
discard
method getBlocks*(
self: BlockStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
## Gets a set of blocks from the blockstore. Blocks might
## be returned in any order.
raiseAssert("getBlocks not implemented!")
method getBlockAndProof*(
self: BlockStore, treeCid: Cid, index: Natural
): Future[?!(Block, CodexProof)] {.base, async: (raises: [CancelledError]), gcsafe.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -66,6 +66,21 @@ method getBlock*(
trace "Error requesting block from cache", cid, error = exc.msg
return failure exc
method getBlocks*(
self: CacheStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var i = 0
proc isFinished(): bool =
i == addresses.len
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
let value = await self.getBlock(addresses[i])
inc(i)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
method getCidAndProof*(
self: CacheStore, treeCid: Cid, index: Natural
): Future[?!(Cid, CodexProof)] {.async: (raises: [CancelledError]).} =
@ -259,6 +274,9 @@ method delBlock*(
return success()
method completeBlock*(self: CacheStore, address: BlockAddress, blk: Block) {.gcsafe.} =
discard
method close*(self: CacheStore): Future[void] {.async: (raises: []).} =
## Close the blockstore, a no-op for this implementation
##

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sugar
import pkg/questionable/results

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -23,6 +23,9 @@ import ../clock
import ../logutils
import ../systemclock
logScope:
topics = "codex maintenance"
const
DefaultBlockInterval* = 10.minutes
DefaultNumBlocksPerInterval* = 1000
@ -40,7 +43,7 @@ proc new*(
repoStore: RepoStore,
interval: Duration,
numberOfBlocksPerInterval = 100,
timer = Timer.new(),
timer = Timer.new("maintenance"),
clock: Clock = SystemClock.new(),
): BlockMaintainer =
## Create new BlockMaintainer instance
@ -59,8 +62,8 @@ proc new*(
proc deleteExpiredBlock(
self: BlockMaintainer, cid: Cid
): Future[void] {.async: (raises: [CancelledError]).} =
if isErr (await self.repoStore.delBlock(cid)):
trace "Unable to delete block from repoStore"
if error =? (await self.repoStore.delBlock(cid)).errorOption:
warn "Unable to delete block from repoStore", error = error.msg
proc processBlockExpiration(
self: BlockMaintainer, be: BlockExpiration
@ -78,13 +81,13 @@ proc runBlockCheck(
)
without iter =? expirations, err:
trace "Unable to obtain blockExpirations iterator from repoStore"
warn "Unable to obtain blockExpirations iterator from repoStore", err = err.msg
return
var numberReceived = 0
for beFut in iter:
without be =? (await beFut), err:
trace "Unable to obtain blockExpiration from iterator"
warn "Unable to obtain blockExpiration from iterator", err = err.msg
continue
inc numberReceived
await self.processBlockExpiration(be)
@ -94,6 +97,7 @@ proc runBlockCheck(
# We're at the end of the dataset and should start from 0 next time.
if numberReceived < self.numberOfBlocksPerInterval:
self.offset = 0
trace "Cycle completed"
proc start*(self: BlockMaintainer) =
proc onTimer(): Future[void] {.async: (raises: []).} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -31,6 +31,31 @@ type NetworkStore* = ref object of BlockStore
engine*: BlockExcEngine # blockexc decision engine
localStore*: BlockStore # local block store
method getBlocks*(
self: NetworkStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var
localAddresses: seq[BlockAddress]
remoteAddresses: seq[BlockAddress]
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for address in addresses:
if not (await address in self.localStore):
remoteAddresses.add(address)
else:
localAddresses.add(address)
if (Moment.now() - lastIdle) >= runtimeQuota:
await idleAsync()
lastIdle = Moment.now()
return chain(
await self.localStore.getBlocks(localAddresses),
self.engine.requestBlocks(remoteAddresses),
)
method getBlock*(
self: NetworkStore, address: BlockAddress
): Future[?!Block] {.async: (raises: [CancelledError]).} =
@ -63,6 +88,9 @@ method getBlock*(
self.getBlock(BlockAddress.init(treeCid, index))
method completeBlock*(self: NetworkStore, address: BlockAddress, blk: Block) =
self.engine.completeBlock(address, blk)
method putBlock*(
self: NetworkStore, blk: Block, ttl = Duration.none
): Future[?!void] {.async: (raises: [CancelledError]).} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -38,6 +38,21 @@ logScope:
# BlockStore API
###########################################################
method getBlocks*(
self: RepoStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var i = 0
proc isFinished(): bool =
i == addresses.len
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
let value = await self.getBlock(addresses[i])
inc(i)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
method getBlock*(
self: RepoStore, cid: Cid
): Future[?!Block] {.async: (raises: [CancelledError]).} =
@ -428,7 +443,6 @@ proc start*(
): Future[void] {.async: (raises: [CancelledError, CodexError]).} =
## Start repo
##
if self.started:
trace "Repo already started"
return
@ -450,6 +464,5 @@ proc stop*(self: RepoStore): Future[void] {.async: (raises: []).} =
return
trace "Stopping repo"
await self.close()
self.started = false

Some files were not shown because too many files have changed in this diff Show More