diff --git a/.github/actions/nimbus-build-system/action.yml b/.github/actions/nimbus-build-system/action.yml index a5b22147..4acbff33 100644 --- a/.github/actions/nimbus-build-system/action.yml +++ b/.github/actions/nimbus-build-system/action.yml @@ -226,7 +226,7 @@ runs: run: | git config --global core.symlinks false - - name: Build Nim and Codex dependencies + - name: Build Nim and Logos Storage dependencies shell: ${{ inputs.shell }} {0} run: | which gcc diff --git a/.github/workflows/Readme.md b/.github/workflows/Readme.md index 1aff4962..f7f00e57 100644 --- a/.github/workflows/Readme.md +++ b/.github/workflows/Readme.md @@ -70,7 +70,7 @@ runners busy for longer on a workflow that you know is going to fail anyway. Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed. -[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage +[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage [composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action [reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows [cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache diff --git a/.github/workflows/ci-reusable.yml b/.github/workflows/ci-reusable.yml index d7475757..acc322b5 100644 --- a/.github/workflows/ci-reusable.yml +++ b/.github/workflows/ci-reusable.yml @@ -54,9 +54,9 @@ jobs: with: node-version: 22 - - name: Install Ethereum node dependencies + - name: Start Ethereum node with Logos Storage contracts if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all' - working-directory: vendor/codex-contracts-eth + working-directory: vendor/logos-storage-contracts-eth env: MSYS2_PATH_TYPE: inherit run: | diff --git a/.github/workflows/deploy-devnet.yml b/.github/workflows/deploy-devnet.yml deleted file mode 100644 index a95a3df0..00000000 --- a/.github/workflows/deploy-devnet.yml +++ /dev/null @@ -1,175 +0,0 @@ -name: Deploy - Devnet - - -on: - workflow_dispatch: - inputs: - codex_image: - description: codexstorage/nim-codex:latest-dist-tests - required: false - type: string - workflow_call: - inputs: - codex_image: - description: codexstorage/nim-codex:latest-dist-tests - required: true - type: string - -env: - CODEX_NAMESPACE: codex - TOOLS_NAMESPACE: common - KUBE_CONFIG: ${{ secrets.DEVNET_KUBE_CONFIG }} - KUBE_VERSION: v1.33.1 - CODEX_IMAGE: ${{ inputs.codex_image }} - SSH_HOSTS: ${{ secrets.DEVNET_SSH_HOSTS }} - SSH_PORT: ${{ secrets.DEVNET_SSH_PORT }} - SSH_USERNAME: ${{ secrets.DEVNET_SSH_USERNAME }} - SSH_PRIVATE_KEY: ${{ secrets.DEVNET_SSH_KEY }} - - -jobs: - deploy-contracts: - name: Deploy contracts - runs-on: ubuntu-latest - steps: - - name: Create access token - uses: actions/create-github-app-token@v2 - id: app-token - with: - app-id: ${{ secrets.DEPLOYER_APP_ID }} - private-key: ${{ secrets.DEPLOYER_PRIVATE_KEY }} - repositories: codex-contracts-eth - - - name: Checkout sources - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Get contracts submodule ref - id: contracts - run: echo "ref=$(git rev-parse HEAD:vendor/codex-contracts-eth)" >> $GITHUB_OUTPUT - - - name: Deploy smart contracts - uses: the-actions-org/workflow-dispatch@v4 - with: - repo: codex-storage/codex-contracts-eth - workflow: devnet-contracts.yml - token: ${{ steps.app-token.outputs.token }} - wait-for-completion-timeout: 20m - wait-for-completion-interval: 20s - inputs: '{ "network": "codex_devnet", "contracts_ref": "${{ steps.contracts.outputs.ref }}" }' - - - bootstrap-nodes: - name: Bootstrap nodes - runs-on: ubuntu-latest - needs: deploy-contracts - steps: - - name: Codex Bootstrap - Update - uses: appleboy/ssh-action@v1 - with: - host: ${{ secrets.DEVNET_SSH_HOSTS }} - username: ${{ secrets.DEVNET_SSH_USERNAME }} - key: ${{ secrets.DEVNET_SSH_KEY }} - port: ${{ secrets.DEVNET_SSH_PORT }} - script: /opt/codex/remote-deploy.sh ${{ env.CODEX_IMAGE }} - - cluster-nodes: - name: Cluster nodes - runs-on: ubuntu-latest - needs: bootstrap-nodes - steps: - - name: Kubectl - Install ${{ env.KUBE_VERSION }} - uses: azure/setup-kubectl@v4 - with: - version: ${{ env.KUBE_VERSION }} - - - name: Kubectl - Kubeconfig - run: | - mkdir -p "${HOME}"/.kube - echo "${{ env.KUBE_CONFIG }}" | base64 -d > "${HOME}"/.kube/config - - - name: Codex Storage - Update - run: | - for node in {1..5}; do - kubectl -n "${{ env.CODEX_NAMESPACE }}" patch statefulset codex-storage-${node} \ - --patch '{"spec": {"template": {"spec":{"containers":[{"name": "codex", "image":"${{ env.CODEX_IMAGE }}"}]}}}}' - done - - - name: Codex Validators - Update - run: | - for node in {1..1}; do - kubectl -n "${{ env.CODEX_NAMESPACE }}" patch statefulset codex-validator-${node} \ - --patch '{"spec": {"template": {"spec":{"containers":[{"name": "codex", "image":"${{ env.CODEX_IMAGE }}"}]}}}}' - done - - - name: Codex Storage - Status - run: | - WAIT=300 - SECONDS=0 - sleep=1 - for instance in {1..5}; do - while (( SECONDS < WAIT )); do - pod=codex-storage-${instance}-1 - phase=$(kubectl get pod "${pod}" -n "${{ env.CODEX_NAMESPACE }}" -o jsonpath='{.status.phase}') - if [[ "${phase}" == "Running" ]]; then - echo "Pod ${pod} is in the ${phase} state" - break - else - echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))" - fi - sleep "${sleep}" - done - done - - - name: Codex Validators - Status - run: | - WAIT=300 - SECONDS=0 - sleep=1 - for instance in {1..1}; do - while (( SECONDS < WAIT )); do - pod=codex-validator-${instance}-1 - phase=$(kubectl get pod "${pod}" -n "${{ env.CODEX_NAMESPACE }}" -o jsonpath='{.status.phase}') - if [[ "${phase}" == "Running" ]]; then - echo "Pod ${pod} is in the ${phase} state" - break - else - echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))" - fi - sleep "${sleep}" - done - done - - - name: Tools - Update - run: | - crawler_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app.kubernetes.io/name=crawler' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true) - discordbot_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app=discordbot' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true) - - for pod in "${crawler_pod}" "${discordbot_pod}"; do - if [[ -n "${pod}" ]]; then - kubectl delete pod -n "${{ env.TOOLS_NAMESPACE }}" "${pod}" --grace-period=10 - fi - done - - - name: Tools - Status - run: | - WAIT=300 - SECONDS=0 - sleep=1 - crawler_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app.kubernetes.io/name=crawler' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true) - discordbot_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app=discordbot' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true) - for pod in "${crawler_pod}" "${discordbot_pod}"; do - if [[ -n "${pod}" ]]; then - while (( SECONDS < WAIT )); do - phase=$(kubectl get pod "${pod}" -n "${{ env.TOOLS_NAMESPACE }}" -o jsonpath='{.status.phase}') - if [[ "${phase}" == "Running" ]]; then - echo "Pod ${pod} is in the ${phase} state" - break - else - echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))" - fi - sleep "${sleep}" - done - fi - done diff --git a/.github/workflows/docker-dist-tests.yml b/.github/workflows/docker-dist-tests.yml deleted file mode 100644 index f6bb163c..00000000 --- a/.github/workflows/docker-dist-tests.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Docker - Dist-Tests - - -on: - push: - branches: - - master - tags: - - 'v*.*.*' - paths-ignore: - - '**/*.md' - - '.gitignore' - - '.github/**' - - '!.github/workflows/docker-dist-tests.yml' - - '!.github/workflows/docker-reusable.yml' - - '!.github/workflows/deploy-devnet.yml' - - 'docker/**' - - '!docker/codex.Dockerfile' - - '!docker/docker-entrypoint.sh' - workflow_dispatch: - inputs: - run_release_tests: - description: Run Release tests - required: false - type: boolean - default: false - deploy_devnet: - description: Deploy Devnet - required: false - type: boolean - default: false - - -jobs: - get-contracts-hash: - runs-on: ubuntu-latest - outputs: - hash: ${{ steps.get-hash.outputs.hash }} - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - - name: Get submodule short hash - id: get-hash - run: | - hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth) - echo "hash=$hash" >> $GITHUB_OUTPUT - - build-and-push: - name: Build and Push - uses: ./.github/workflows/docker-reusable.yml - needs: get-contracts-hash - with: - nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true' - nat_ip_auto: true - tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }} - tag_suffix: dist-tests - tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }} - contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests" - run_release_tests: ${{ inputs.run_release_tests }} - secrets: inherit - - deploy-devnet: - name: Deploy Devnet - uses: ./.github/workflows/deploy-devnet.yml - needs: build-and-push - if: ${{ inputs.deploy_devnet || github.event_name == 'push' && github.ref_name == github.event.repository.default_branch }} - with: - codex_image: ${{ needs.build-and-push.outputs.codex_image }} - secrets: inherit diff --git a/.github/workflows/docker-reusable.yml b/.github/workflows/docker-reusable.yml index eb614216..b8bfeffa 100644 --- a/.github/workflows/docker-reusable.yml +++ b/.github/workflows/docker-reusable.yml @@ -70,7 +70,7 @@ on: type: string outputs: codex_image: - description: Codex Docker image tag + description: Logos Storage Docker image tag value: ${{ jobs.publish.outputs.codex_image }} @@ -87,7 +87,7 @@ env: TAG_SUFFIX: ${{ inputs.tag_suffix }} CONTRACT_IMAGE: ${{ inputs.contract_image }} # Tests - TESTS_SOURCE: codex-storage/cs-codex-dist-tests + TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests TESTS_BRANCH: master CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }} CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }} @@ -316,7 +316,7 @@ jobs: max-parallel: 1 matrix: tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }} - uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master + uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master with: source: ${{ needs.compute-tests-inputs.outputs.source }} branch: ${{ needs.compute-tests-inputs.outputs.branch }} @@ -333,7 +333,7 @@ jobs: name: Run Release Tests needs: [compute-tests-inputs] if: ${{ inputs.run_release_tests == 'true' }} - uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master + uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master with: source: ${{ needs.compute-tests-inputs.outputs.source }} branch: ${{ needs.compute-tests-inputs.outputs.branch }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3a633a81..2b62e3b2 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -31,7 +31,7 @@ jobs: - name: Get submodule short hash id: get-hash run: | - hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth) + hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth) echo "hash=$hash" >> $GITHUB_OUTPUT build-and-push: name: Build and Push diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 4232ff0f..18607c70 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -52,7 +52,7 @@ jobs: node-version: 18 - name: Build OpenAPI - run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API" + run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API" - name: Build Postman Collection run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d4ba1bca..4c2cbd90 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ env: cache_nonce: 0 # Allows for easily busting actions/cache caches nim_version: pinned rust_version: 1.79.0 - codex_binary_base: codex + storage_binary_base: storage cirdl_binary_base: cirdl build_dir: build nim_flags: '' @@ -32,7 +32,6 @@ jobs: matrix: | os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2} @@ -74,18 +73,18 @@ jobs: windows*) os_name="windows" ;; esac github_ref_name="${GITHUB_REF_NAME/\//-}" - codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" + storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}" if [[ ${os_name} == "windows" ]]; then - codex_binary="${codex_binary}.exe" + storage_binary="${storage_binary}.exe" cirdl_binary="${cirdl_binary}.exe" fi - echo "codex_binary=${codex_binary}" >>$GITHUB_ENV + echo "storage_binary=${storage_binary}" >>$GITHUB_ENV echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV - name: Release - Build run: | - make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}" + make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}" make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}" - name: Release - Libraries @@ -96,11 +95,11 @@ jobs: done fi - - name: Release - Upload codex build artifacts + - name: Release - Upload Logos Storage build artifacts uses: actions/upload-artifact@v4 with: - name: release-${{ env.codex_binary }} - path: ${{ env.build_dir }}/${{ env.codex_binary_base }}* + name: release-${{ env.storage_binary }} + path: ${{ env.build_dir }}/${{ env.storage_binary_base }}* retention-days: 30 - name: Release - Upload cirdl build artifacts @@ -140,7 +139,7 @@ jobs: } # Compress and prepare - for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do + for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do if [[ "${file}" == *".exe"* ]]; then # Windows - binary only @@ -189,6 +188,7 @@ jobs: aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }} echo "${branch}" > "${folder}"/latest aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }} + rm -f "${folder}"/latest # master branch elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then @@ -212,6 +212,6 @@ jobs: if: startsWith(github.ref, 'refs/tags/') with: token: ${{ secrets.DISPATCH_PAT }} - repository: codex-storage/py-codex-api-client + repository: logos-storage/logos-storage-py-api-client event-type: generate - client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}' + client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}' diff --git a/.gitmodules b/.gitmodules index 5cc2bfab..e5beeb1e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -37,22 +37,17 @@ path = vendor/nim-nitro url = https://github.com/status-im/nim-nitro.git ignore = untracked - branch = master + branch = main [submodule "vendor/questionable"] path = vendor/questionable url = https://github.com/status-im/questionable.git ignore = untracked - branch = master -[submodule "vendor/upraises"] - path = vendor/upraises - url = https://github.com/markspanbroek/upraises.git - ignore = untracked - branch = master + branch = main [submodule "vendor/asynctest"] path = vendor/asynctest url = https://github.com/status-im/asynctest.git ignore = untracked - branch = master + branch = main [submodule "vendor/nim-presto"] path = vendor/nim-presto url = https://github.com/status-im/nim-presto.git @@ -132,7 +127,7 @@ path = vendor/nim-websock url = https://github.com/status-im/nim-websock.git ignore = untracked - branch = master + branch = main [submodule "vendor/nim-contract-abi"] path = vendor/nim-contract-abi url = https://github.com/status-im/nim-contract-abi @@ -160,13 +155,13 @@ path = vendor/nim-taskpools url = https://github.com/status-im/nim-taskpools.git ignore = untracked - branch = master + branch = stable [submodule "vendor/nim-leopard"] path = vendor/nim-leopard url = https://github.com/status-im/nim-leopard.git -[submodule "vendor/nim-codex-dht"] - path = vendor/nim-codex-dht - url = https://github.com/codex-storage/nim-codex-dht.git +[submodule "vendor/logos-storage-nim-dht"] + path = vendor/logos-storage-nim-dht + url = https://github.com/logos-storage/logos-storage-nim-dht.git ignore = untracked branch = master [submodule "vendor/nim-datastore"] @@ -178,9 +173,11 @@ [submodule "vendor/nim-eth"] path = vendor/nim-eth url = https://github.com/status-im/nim-eth -[submodule "vendor/codex-contracts-eth"] - path = vendor/codex-contracts-eth - url = https://github.com/status-im/codex-contracts-eth +[submodule "vendor/logos-storage-contracts-eth"] + path = vendor/logos-storage-contracts-eth + url = https://github.com/logos-storage/logos-storage-contracts-eth.git + ignore = untracked + branch = master [submodule "vendor/nim-protobuf-serialization"] path = vendor/nim-protobuf-serialization url = https://github.com/status-im/nim-protobuf-serialization @@ -195,26 +192,28 @@ url = https://github.com/zevv/npeg [submodule "vendor/nim-poseidon2"] path = vendor/nim-poseidon2 - url = https://github.com/codex-storage/nim-poseidon2.git + url = https://github.com/logos-storage/nim-poseidon2.git + ignore = untracked + branch = master [submodule "vendor/constantine"] path = vendor/constantine url = https://github.com/mratsim/constantine.git [submodule "vendor/nim-circom-compat"] path = vendor/nim-circom-compat - url = https://github.com/codex-storage/nim-circom-compat.git + url = https://github.com/logos-storage/nim-circom-compat.git ignore = untracked branch = master -[submodule "vendor/codex-storage-proofs-circuits"] - path = vendor/codex-storage-proofs-circuits - url = https://github.com/codex-storage/codex-storage-proofs-circuits.git +[submodule "vendor/logos-storage-proofs-circuits"] + path = vendor/logos-storage-proofs-circuits + url = https://github.com/logos-storage/logos-storage-proofs-circuits.git ignore = untracked branch = master [submodule "vendor/nim-serde"] path = vendor/nim-serde - url = https://github.com/codex-storage/nim-serde.git + url = https://github.com/logos-storage/nim-serde.git [submodule "vendor/nim-leveldbstatic"] path = vendor/nim-leveldbstatic - url = https://github.com/codex-storage/nim-leveldb.git + url = https://github.com/logos-storage/nim-leveldb.git [submodule "vendor/nim-zippy"] path = vendor/nim-zippy url = https://github.com/status-im/nim-zippy.git @@ -225,9 +224,9 @@ path = vendor/nim-quic url = https://github.com/vacp2p/nim-quic.git ignore = untracked - branch = master + branch = main [submodule "vendor/nim-ngtcp2"] path = vendor/nim-ngtcp2 url = https://github.com/vacp2p/nim-ngtcp2.git ignore = untracked - branch = master + branch = main \ No newline at end of file diff --git a/Makefile b/Makefile index 11361fae..8b5bc371 100644 --- a/Makefile +++ b/Makefile @@ -93,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file # default target, because it's the first one that doesn't start with '.' -# Builds the codex binary +# Builds the Logos Storage binary all: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims + $(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims # Build tools/cirdl cirdl: | deps @@ -246,6 +246,7 @@ format: $(NPH) *.nim $(NPH) codex/ $(NPH) tests/ + $(NPH) library/ clean-nph: rm -f $(NPH) @@ -256,4 +257,32 @@ print-nph-path: clean: | clean-nph +################ +## C Bindings ## +################ +.PHONY: libstorage + +STATIC ?= 0 + +ifneq ($(strip $(STORAGE_LIB_PARAMS)),) +NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS) +endif + +libstorage: + $(MAKE) deps + rm -f build/libstorage* + +ifeq ($(STATIC), 1) + echo -e $(BUILD_MSG) "build/$@.a" && \ + $(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims +else ifeq ($(detected_OS),Windows) + echo -e $(BUILD_MSG) "build/$@.dll" && \ + $(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims +else ifeq ($(detected_OS),macOS) + echo -e $(BUILD_MSG) "build/$@.dylib" && \ + $(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims +else + echo -e $(BUILD_MSG) "build/$@.so" && \ + $(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims +endif endif # "variables.mk" was not included diff --git a/README.md b/README.md index 2a15051f..8c19c66f 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,22 @@ -# Codex Decentralized Durability Engine +# Logos Storage Decentralized Engine -> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval. +> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks. > WARNING: This project is under active development and is considered pre-alpha. [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability) -[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster) -[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster) -[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex) +[![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster) +[![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster) +[![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim) [![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ) ![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex) ## Build and Run -For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build). +For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build). To build the project, clone it and run: @@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root Run the client with: ```bash -build/codex +build/storage ``` ## Configuration -It is possible to configure a Codex node in several ways: +It is possible to configure a Logos Storage node in several ways: 1. CLI options 2. Environment variables 3. Configuration file @@ -45,21 +45,71 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration) ## Guides -To get acquainted with Codex, consider: -* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and; -* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well. +To get acquainted with Logos Storage, consider: +* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and; +* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well. ## API The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). +## Bindings + +Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder. +Currently, only a Go binding is included. + +### Build the C library + +```bash +make libstorage +``` + +This produces the shared library under `build/`. + +### Run the Go example + +Build the Go example: + +```bash +go build -o storage-go examples/golang/storage.go +``` + +Export the library path: + +```bash +export LD_LIBRARY_PATH=build +``` + +Run the example: + +```bash +./storage-go +``` + +### Static vs Dynamic build + +By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime. +If you prefer a static library (`libstorage.a`), set the `STATIC` flag: + +```bash +# Build dynamic (default) +make libstorage + +# Build static +make STATIC=1 libstorage +``` + +### Limitation + +Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed. + ## Contributing and development Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. ### Linting and formatting -`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. +`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. If you are setting up fresh setup, in order to get `nph` run `make build-nph`. In order to format files run `make nph/`. If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. diff --git a/benchmarks/README.md b/benchmarks/README.md index 0cff64e9..6f7a0a06 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -10,17 +10,17 @@ nim c -r run_benchmarks ``` By default all circuit files for each combinations of circuit args will be generated in a unique folder named like: - nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3 + logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3 Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed. You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition. -The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this. +The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this. -## Codex Ark Circom CLI +## Logos Storage Ark Circom CLI -Runs Codex's prover setup with Ark / Circom. +Runs Logos Storage's prover setup with Ark / Circom. Compile: ```sh diff --git a/benchmarks/create_circuits.nim b/benchmarks/create_circuits.nim index 911dcd51..72089d6e 100644 --- a/benchmarks/create_circuits.nim +++ b/benchmarks/create_circuits.nim @@ -29,10 +29,10 @@ proc findCodexProjectDir(): string = func default*(tp: typedesc[CircuitEnv]): CircuitEnv = let codexDir = findCodexProjectDir() result.nimCircuitCli = - codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" / + codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" / "proof_input" / "cli" result.circuitDirIncludes = - codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit" + codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit" result.ptauPath = codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau" result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri @@ -118,7 +118,7 @@ proc createCircuit*( ## ## All needed circuit files will be generated as needed. ## They will be located in `circBenchDir` which defaults to a folder like: - ## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3` + ## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3` ## with all the given CircuitArgs. ## let circdir = circBenchDir diff --git a/benchmarks/utils.nim b/benchmarks/utils.nim index af5cdc25..7bd0f2e0 100644 --- a/benchmarks/utils.nim +++ b/benchmarks/utils.nim @@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) = ) benchRuns[benchmarkName] = (runs.avg(), count) -template printBenchMarkSummaries*(printRegular=true, printTsv=true) = +template printBenchMarkSummaries*(printRegular = true, printTsv = true) = if printRegular: echo "" for k, v in benchRuns: echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k - + if printTsv: echo "" echo "name", "\t", "avgTimeSec", "\t", "count" for k, v in benchRuns: echo k, "\t", v.avgTimeSec, "\t", v.count - import std/math func floorLog2*(x: int): int = diff --git a/build.nims b/build.nims index 69596e01..72f44921 100644 --- a/build.nims +++ b/build.nims @@ -8,7 +8,13 @@ proc truthy(val: string): bool = const truthySwitches = @["yes", "1", "on", "true"] return val in truthySwitches -proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = +proc buildBinary( + srcName: string, + outName = os.lastPathPart(srcName), + srcDir = "./", + params = "", + lang = "c", +) = if not dirExists "build": mkDir "build" @@ -23,32 +29,56 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = let # Place build output in 'build' folder, even if name includes a longer path. - outName = os.lastPathPart(name) cmd = "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & - name & ".nim" + srcName & ".nim" exec(cmd) -proc test(name: string, srcDir = "tests/", params = "", lang = "c") = - buildBinary name, srcDir, params - exec "build/" & name +proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") = + if not dirExists "build": + mkDir "build" -task codex, "build codex binary": + if `type` == "dynamic": + let lib_name = ( + when defined(windows): name & ".dll" + elif defined(macosx): name & ".dylib" + else: name & ".so" + ) + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " & + "--nimMainPrefix:libstorage -d:noSignalHandler " & + "-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " & + "-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim" + else: + exec "nim c" & " --out:build/" & name & + ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " & + "--nimMainPrefix:libstorage -d:noSignalHandler " & + "-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " & + "-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim" + +proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") = + buildBinary name, outName, srcDir, params + exec "build/" & outName + +task storage, "build logos storage binary": buildBinary "codex", + outname = "storage", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE" task toolsCirdl, "build tools/cirdl binary": buildBinary "tools/cirdl/cirdl" -task testCodex, "Build & run Codex tests": - test "testCodex", params = "-d:codex_enable_proof_failures=true" +task testStorage, "Build & run Logos Storage tests": + test "testCodex", + outName = "testStorage", params = "-d:storage_enable_proof_failures=true" -task testContracts, "Build & run Codex Contract tests": +task testContracts, "Build & run Logos Storage Contract tests": test "testContracts" task testIntegration, "Run integration tests": buildBinary "codex", + outName = "storage", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:chronicles_disabled_topics=JSONRPC-HTTP-CLIENT,websock,libp2p,discv5 -d:codex_enable_proof_failures=true" var sinks = @["textlines[nocolors,file]"] @@ -63,24 +93,24 @@ task testIntegration, "Run integration tests": # test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " & # "-d:chronicles_enabled_topics:integration:TRACE" -task build, "build codex binary": - codexTask() +task build, "build Logos Storage binary": + storageTask() task test, "Run tests": - testCodexTask() + testStorageTask() task testTools, "Run Tools tests": toolsCirdlTask() test "testTools" task testAll, "Run all tests (except for Taiko L2 tests)": - testCodexTask() + testStorageTask() testContractsTask() testIntegrationTask() testToolsTask() task testTaiko, "Run Taiko L2 tests": - codexTask() + storageTask() test "testTaiko" import strutils @@ -113,7 +143,7 @@ task coverage, "generates code coverage report": test "coverage", srcDir = "tests/", params = - " --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true" + " --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true" exec("rm nimcache/coverage/*.c") rmDir("coverage") mkDir("coverage") @@ -126,10 +156,32 @@ task coverage, "generates code coverage report": nimSrcs ) echo " ======== Generating HTML coverage report ======== " - exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ") + exec( + "genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report " + ) echo " ======== Coverage report Done ======== " task showCoverage, "open coverage html": echo " ======== Opening HTML coverage report in browser... ======== " if findExe("open") != "": exec("open coverage/report/index.html") + +task libstorageDynamic, "Generate bindings": + var params = "" + when compiles(commandLineParams): + for param in commandLineParams(): + if param.len > 0 and param.startsWith("-"): + params.add " " & param + + let name = "libstorage" + buildLibrary name, "library/", params, "dynamic" + +task libstorageStatic, "Generate bindings": + var params = "" + when compiles(commandLineParams): + for param in commandLineParams(): + if param.len > 0 and param.startsWith("-"): + params.add " " & param + + let name = "libstorage" + buildLibrary name, "library/", params, "static" diff --git a/ci/linux.Jenkinsfile b/ci/linux.Jenkinsfile new file mode 100644 index 00000000..8080f7b4 --- /dev/null +++ b/ci/linux.Jenkinsfile @@ -0,0 +1,51 @@ +#!/usr/bin/env groovy +library 'status-jenkins-lib@v1.9.37' + +pipeline { + agent { + docker { + label 'linuxcontainer' + image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0' + args '--volume=/nix:/nix ' + + '--volume=/etc/nix:/etc/nix ' + } + } + + options { + timestamps() + ansiColor('xterm') + timeout(time: 20, unit: 'MINUTES') + disableConcurrentBuilds() + disableRestartFromStage() + /* manage how many builds we keep */ + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + stages { + stage('Build') { + steps { + script { + nix.flake("default") + } + } + } + + stage('Check') { + steps { + script { + sh './result/bin/storage --version' + } + } + } + } + + post { + cleanup { + cleanWs() + dir(env.WORKSPACE_TMP) { deleteDir() } + } + } +} diff --git a/Jenkinsfile b/ci/macos.Jenkinsfile similarity index 57% rename from Jenkinsfile rename to ci/macos.Jenkinsfile index c7e54c92..cbc55624 100644 --- a/Jenkinsfile +++ b/ci/macos.Jenkinsfile @@ -1,11 +1,15 @@ #!/usr/bin/env groovy -library 'status-jenkins-lib@v1.9.13' +library 'status-jenkins-lib@v1.9.37' pipeline { - agent { label 'linux && x86_64 && nix-2.24' } + agent { label 'macos && aarch64 && nix' } options { + timestamps() + ansiColor('xterm') + timeout(time: 20, unit: 'MINUTES') disableConcurrentBuilds() + disableRestartFromStage() /* manage how many builds we keep */ buildDiscarder(logRotator( numToKeepStr: '20', @@ -25,13 +29,16 @@ pipeline { stage('Check') { steps { script { - sh './result/bin/codex --version' + sh './result/bin/storage --version' } } } } post { - cleanup { cleanWs() } + cleanup { + cleanWs() + dir(env.WORKSPACE_TMP) { deleteDir() } + } } } diff --git a/codex.nim b/codex.nim index 7749bdee..24d27959 100644 --- a/codex.nim +++ b/codex.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -45,7 +45,7 @@ when isMainModule: let config = CodexConf.load( version = codexFullVersion, - envVarsPrefix = "codex", + envVarsPrefix = "storage", secondarySources = proc( config: CodexConf, sources: auto ) {.gcsafe, raises: [ConfigurationError].} = @@ -54,6 +54,16 @@ when isMainModule: , ) config.setupLogging() + + try: + updateLogLevel(config.logLevel) + except ValueError as err: + try: + stderr.write "Invalid value for --log-level. " & err.msg & "\n" + except IOError: + echo "Invalid value for --log-level. " & err.msg + quit QuitFailure + config.setupMetrics() if not (checkAndCreateDataDir((config.dataDir).string)): @@ -89,15 +99,15 @@ when isMainModule: try: CodexServer.new(config, privateKey) except Exception as exc: - error "Failed to start Codex", msg = exc.msg + error "Failed to start Logos Storage", msg = exc.msg quit QuitFailure ## Ctrl+C handling proc doShutdown() = - shutdown = server.stop() + shutdown = server.shutdown() state = CodexStatus.Stopping - notice "Stopping Codex" + notice "Stopping Logos Storage" proc controlCHandler() {.noconv.} = when defined(windows): @@ -128,7 +138,7 @@ when isMainModule: try: waitFor server.start() except CatchableError as error: - error "Codex failed to start", error = error.msg + error "Logos Storage failed to start", error = error.msg # XXX ideally we'd like to issue a stop instead of quitting cold turkey, # but this would mean we'd have to fix the implementation of all # services so they won't crash if we attempt to stop them before they @@ -149,7 +159,7 @@ when isMainModule: # be assigned before state switches to Stopping waitFor shutdown except CatchableError as error: - error "Codex didn't shutdown correctly", error = error.msg + error "Logos Storage didn't shutdown correctly", error = error.msg quit QuitFailure - notice "Exited codex" + notice "Exited Storage" diff --git a/codex.nimble b/codex.nimble index f3033861..43c39219 100644 --- a/codex.nimble +++ b/codex.nimble @@ -1,5 +1,5 @@ version = "0.1.0" -author = "Codex Team" +author = "Logos Storage Team" description = "p2p data durability engine" license = "MIT" binDir = "build" diff --git a/codex/blockexchange/engine/advertiser.nim b/codex/blockexchange/engine/advertiser.nim index 9e68ebbb..c038db38 100644 --- a/codex/blockexchange/engine/advertiser.nim +++ b/codex/blockexchange/engine/advertiser.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/blockexchange/engine/discovery.nim b/codex/blockexchange/engine/discovery.nim index b32b8555..6a1b808d 100644 --- a/codex/blockexchange/engine/discovery.nim +++ b/codex/blockexchange/engine/discovery.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -8,6 +8,7 @@ ## those terms. import std/sequtils +import std/algorithm import pkg/chronos import pkg/libp2p/cid @@ -38,6 +39,7 @@ const DefaultConcurrentDiscRequests = 10 DefaultDiscoveryTimeout = 1.minutes DefaultMinPeersPerBlock = 3 + DefaultMaxPeersPerBlock = 8 DefaultDiscoveryLoopSleep = 3.seconds type DiscoveryEngine* = ref object of RootObj @@ -51,11 +53,32 @@ type DiscoveryEngine* = ref object of RootObj discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle discoveryQueue*: AsyncQueue[Cid] # Discovery queue trackedFutures*: TrackedFutures # Tracked Discovery tasks futures - minPeersPerBlock*: int # Max number of peers with block + minPeersPerBlock*: int # Min number of peers with block + maxPeersPerBlock*: int # Max number of peers with block discoveryLoopSleep: Duration # Discovery loop sleep inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests +proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} = + var haves = b.peers.peersHave(cid) + let count = haves.len - b.maxPeersPerBlock + if count <= 0: + return + + haves.sort( + proc(a, b: BlockExcPeerCtx): int = + cmp(a.lastExchange, b.lastExchange) + ) + + let toRemove = haves[0 ..< count] + for peer in toRemove: + try: + peer.cleanPresence(BlockAddress.init(cid)) + trace "Removed block presence from peer", cid, peer = peer.id + except CatchableError as exc: + error "Failed to clean presence for peer", + cid, peer = peer.id, error = exc.msg, name = exc.name + proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} = try: while b.discEngineRunning: @@ -78,8 +101,16 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} = trace "Discovery request already in progress", cid continue + trace "Running discovery task for cid", cid + let haves = b.peers.peersHave(cid) + if haves.len > b.maxPeersPerBlock: + trace "Cleaning up excess peers", + cid, peers = haves.len, max = b.maxPeersPerBlock + b.cleanupExcessPeers(cid) + continue + if haves.len < b.minPeersPerBlock: let request = b.discovery.find(cid) b.inFlightDiscReqs[cid] = request @@ -156,6 +187,7 @@ proc new*( concurrentDiscReqs = DefaultConcurrentDiscRequests, discoveryLoopSleep = DefaultDiscoveryLoopSleep, minPeersPerBlock = DefaultMinPeersPerBlock, + maxPeersPerBlock = DefaultMaxPeersPerBlock, ): DiscoveryEngine = ## Create a discovery engine instance for advertising services ## @@ -171,4 +203,5 @@ proc new*( inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](), discoveryLoopSleep: discoveryLoopSleep, minPeersPerBlock: minPeersPerBlock, + maxPeersPerBlock: maxPeersPerBlock, ) diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index 0d04fd7f..f9245885 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -12,12 +12,14 @@ import std/sets import std/options import std/algorithm import std/sugar +import std/random import pkg/chronos import pkg/libp2p/[cid, switch, multihash, multicodec] import pkg/metrics import pkg/stint import pkg/questionable +import pkg/stew/shims/sets import ../../rng import ../../stores/blockstore @@ -63,30 +65,59 @@ declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sen declareCounter( codex_block_exchange_blocks_received, "codex blockexchange blocks received" ) +declareCounter( + codex_block_exchange_spurious_blocks_received, + "codex blockexchange unrequested/duplicate blocks received", +) +declareCounter( + codex_block_exchange_discovery_requests_total, + "Total number of peer discovery requests sent", +) +declareCounter( + codex_block_exchange_peer_timeouts_total, "Total number of peer activity timeouts" +) +declareCounter( + codex_block_exchange_requests_failed_total, + "Total number of block requests that failed after exhausting retries", +) const - DefaultMaxPeersPerRequest* = 10 + # The default max message length of nim-libp2p is 100 megabytes, meaning we can + # in principle fit up to 1600 64k blocks per message, so 20 is well under + # that number. + DefaultMaxBlocksPerMessage = 20 DefaultTaskQueueSize = 100 DefaultConcurrentTasks = 10 + # Don't do more than one discovery request per `DiscoveryRateLimit` seconds. + DiscoveryRateLimit = 3.seconds + DefaultPeerActivityTimeout = 1.minutes + # Match MaxWantListBatchSize to efficiently respond to incoming WantLists + PresenceBatchSize = MaxWantListBatchSize + CleanupBatchSize = 2048 type TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.} TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.} + PeerSelector* = + proc(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx {.gcsafe, raises: [].} BlockExcEngine* = ref object of RootObj localStore*: BlockStore # Local block store for this instance - network*: BlockExcNetwork # Petwork interface + network*: BlockExcNetwork # Network interface peers*: PeerCtxStore # Peers we're currently actively exchanging with taskQueue*: AsyncHeapQueue[BlockExcPeerCtx] - # Peers we're currently processing tasks for + selectPeer*: PeerSelector # Peers we're currently processing tasks for concurrentTasks: int # Number of concurrent peers we're serving at any given time trackedFutures: TrackedFutures # Tracks futures of blockexc tasks blockexcRunning: bool # Indicates if the blockexc task is running + maxBlocksPerMessage: int + # Maximum number of blocks we can squeeze in a single message pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved wallet*: WalletRef # Nitro wallet for micropayments pricing*: ?Pricing # Optional bandwidth pricing discovery*: DiscoveryEngine advertiser*: Advertiser + lastDiscRequest: Moment # time of last discovery request Pricing* = object address*: EthAddress @@ -104,7 +135,6 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} proc start*(self: BlockExcEngine) {.async: (raises: []).} = ## Start the blockexc task ## - await self.discovery.start() await self.advertiser.start() @@ -154,8 +184,145 @@ proc sendWantBlock( ) # we want this remote to send us a block codex_block_exchange_want_block_lists_sent.inc() -proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx = - Rng.instance.sample(peers) +proc sendBatchedWantList( + self: BlockExcEngine, + peer: BlockExcPeerCtx, + addresses: seq[BlockAddress], + full: bool, +) {.async: (raises: [CancelledError]).} = + var offset = 0 + while offset < addresses.len: + let batchEnd = min(offset + MaxWantListBatchSize, addresses.len) + let batch = addresses[offset ..< batchEnd] + + trace "Sending want list batch", + peer = peer.id, + batchSize = batch.len, + offset = offset, + total = addresses.len, + full = full + + await self.network.request.sendWantList( + peer.id, batch, full = (full and offset == 0) + ) + for address in batch: + peer.lastSentWants.incl(address) + + offset = batchEnd + +proc refreshBlockKnowledge( + self: BlockExcEngine, peer: BlockExcPeerCtx, skipDelta = false, resetBackoff = false +) {.async: (raises: [CancelledError]).} = + if peer.lastSentWants.len > 0: + var toRemove: seq[BlockAddress] + + for address in peer.lastSentWants: + if address notin self.pendingBlocks: + toRemove.add(address) + + if toRemove.len >= CleanupBatchSize: + await idleAsync() + break + + for addr in toRemove: + peer.lastSentWants.excl(addr) + + if self.pendingBlocks.wantListLen == 0: + if peer.lastSentWants.len > 0: + trace "Clearing want list tracking, no pending blocks", peer = peer.id + peer.lastSentWants.clear() + return + + # We send only blocks that the peer hasn't already told us that they already have. + let + peerHave = peer.peerHave + toAsk = toHashSet(self.pendingBlocks.wantList.toSeq.filterIt(it notin peerHave)) + + if toAsk.len == 0: + if peer.lastSentWants.len > 0: + trace "Clearing want list tracking, peer has all blocks", peer = peer.id + peer.lastSentWants.clear() + return + + let newWants = toAsk - peer.lastSentWants + + if peer.lastSentWants.len > 0 and not skipDelta: + if newWants.len > 0: + trace "Sending delta want list update", + peer = peer.id, newWants = newWants.len, totalWants = toAsk.len + + await self.sendBatchedWantList(peer, newWants.toSeq, full = false) + + if resetBackoff: + peer.wantsUpdated + else: + trace "No changes in want list, skipping send", peer = peer.id + peer.lastSentWants = toAsk + else: + trace "Sending full want list", peer = peer.id, length = toAsk.len + + await self.sendBatchedWantList(peer, toAsk.toSeq, full = true) + + if resetBackoff: + peer.wantsUpdated + +proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledError]).} = + let runtimeQuota = 10.milliseconds + var lastIdle = Moment.now() + + for peer in self.peers.peers.values.toSeq: + # We refresh block knowledge if: + # 1. the peer hasn't been refreshed in a while; + # 2. the list of blocks we care about has changed. + # + # Note that because of (2), it is important that we update our + # want list in the coarsest way possible instead of over many + # small updates. + # + + # In dynamic swarms, staleness will dominate latency. + let + hasNewBlocks = peer.lastRefresh < self.pendingBlocks.lastInclusion + isKnowledgeStale = peer.isKnowledgeStale + + if isKnowledgeStale or hasNewBlocks: + if not peer.refreshInProgress: + peer.refreshRequested() + await self.refreshBlockKnowledge( + peer, skipDelta = isKnowledgeStale, resetBackoff = hasNewBlocks + ) + else: + trace "Not refreshing: peer is up to date", peer = peer.id + + if (Moment.now() - lastIdle) >= runtimeQuota: + try: + await idleAsync() + except CancelledError: + discard + lastIdle = Moment.now() + +proc searchForNewPeers(self: BlockExcEngine, cid: Cid) = + if self.lastDiscRequest + DiscoveryRateLimit < Moment.now(): + trace "Searching for new peers for", cid = cid + codex_block_exchange_discovery_requests_total.inc() + self.lastDiscRequest = Moment.now() # always refresh before calling await! + self.discovery.queueFindBlocksReq(@[cid]) + else: + trace "Not searching for new peers, rate limit not expired", cid = cid + +proc evictPeer(self: BlockExcEngine, peer: PeerId) = + ## Cleanup disconnected peer + ## + + trace "Evicting disconnected/departed peer", peer + + let peerCtx = self.peers.get(peer) + if not peerCtx.isNil: + for address in peerCtx.blocksRequested: + self.pendingBlocks.clearRequest(address, peer.some) + + # drop the peer from the peers table + self.peers.remove(peer) proc downloadInternal( self: BlockExcEngine, address: BlockAddress @@ -173,41 +340,147 @@ proc downloadInternal( if self.pendingBlocks.retriesExhausted(address): trace "Error retries exhausted" + codex_block_exchange_requests_failed_total.inc() handle.fail(newException(RetriesExhaustedError, "Error retries exhausted")) break - trace "Running retry handle" let peers = self.peers.getPeersForBlock(address) logScope: peersWith = peers.with.len peersWithout = peers.without.len - trace "Peers for block" - if peers.with.len > 0: - self.pendingBlocks.setInFlight(address, true) - await self.sendWantBlock(@[address], peers.with.randomPeer) - else: - self.pendingBlocks.setInFlight(address, false) + if peers.with.len == 0: + # We know of no peers that have the block. if peers.without.len > 0: - await self.sendWantHave(@[address], peers.without) - self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) + # If we have peers connected but none of them have the block, this + # could be because our knowledge about what they have has run stale. + # Tries to refresh it. + await self.refreshBlockKnowledge() + # Also tries to look for new peers for good measure. + # TODO: in the future, peer search and knowledge maintenance should + # be completely decoupled from one another. It is very hard to + # control what happens and how many neighbors we get like this. + self.searchForNewPeers(address.cidOrTreeCid) - await (handle or sleepAsync(self.pendingBlocks.retryInterval)) + let nextDiscovery = + if self.lastDiscRequest + DiscoveryRateLimit > Moment.now(): + (self.lastDiscRequest + DiscoveryRateLimit - Moment.now()) + else: + 0.milliseconds + + let retryDelay = + max(secs(rand(self.pendingBlocks.retryInterval.secs)), nextDiscovery) + + # We now wait for a bit and then retry. If the handle gets completed in the + # meantime (cause the presence handler might have requested the block and + # received it in the meantime), we are done. Retry delays are randomized + # so we don't get all block loops spinning at the same time. + await handle or sleepAsync(retryDelay) + if handle.finished: + break + + # Without decrementing the retries count, this would infinitely loop + # trying to find peers. + self.pendingBlocks.decRetries(address) + + # If we still don't have the block, we'll go for another cycle. + trace "No peers for block, will retry shortly" + continue + + # Once again, it might happen that the block was requested to a peer + # in the meantime. If so, we don't need to do anything. Otherwise, + # we'll be the ones placing the request. + let scheduledPeer = + if not self.pendingBlocks.isRequested(address): + let peer = self.selectPeer(peers.with) + discard self.pendingBlocks.markRequested(address, peer.id) + peer.blockRequestScheduled(address) + trace "Request block from block retry loop" + await self.sendWantBlock(@[address], peer) + peer + else: + let peerId = self.pendingBlocks.getRequestPeer(address).get() + self.peers.get(peerId) + + if scheduledPeer.isNil: + trace "Scheduled peer no longer available, clearing stale request", address + self.pendingBlocks.clearRequest(address) + continue + + # Parks until either the block is received, or the peer times out. + let activityTimer = scheduledPeer.activityTimer() + await handle or activityTimer # TODO: or peerDropped + activityTimer.cancel() + + # XXX: we should probably not have this. Blocks should be retried + # to infinity unless cancelled by the client. self.pendingBlocks.decRetries(address) if handle.finished: trace "Handle for block finished", failed = handle.failed break + else: + # If the peer timed out, retries immediately. + trace "Peer timed out during block request", peer = scheduledPeer.id + codex_block_exchange_peer_timeouts_total.inc() + await self.network.dropPeer(scheduledPeer.id) + # Evicts peer immediately or we may end up picking it again in the + # next retry. + self.evictPeer(scheduledPeer.id) except CancelledError as exc: trace "Block download cancelled" if not handle.finished: await handle.cancelAndWait() except RetriesExhaustedError as exc: warn "Retries exhausted for block", address, exc = exc.msg + codex_block_exchange_requests_failed_total.inc() if not handle.finished: handle.fail(exc) finally: - self.pendingBlocks.setInFlight(address, false) + self.pendingBlocks.clearRequest(address) + +proc requestBlocks*( + self: BlockExcEngine, addresses: seq[BlockAddress] +): SafeAsyncIter[Block] = + var handles: seq[BlockHandle] + + # Adds all blocks to pendingBlocks before calling the first downloadInternal. This will + # ensure that we don't send incomplete want lists. + for address in addresses: + if address notin self.pendingBlocks: + handles.add(self.pendingBlocks.getWantHandle(address)) + + for address in addresses: + self.trackedFutures.track(self.downloadInternal(address)) + + let totalHandles = handles.len + var completed = 0 + + proc isFinished(): bool = + completed == totalHandles + + proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} = + # Be it success or failure, we're completing this future. + let value = + try: + # FIXME: this is super expensive. We're doing several linear scans, + # not to mention all the copying and callback fumbling in `one`. + let + handle = await one(handles) + i = handles.find(handle) + handles.del(i) + success await handle + except CancelledError as err: + warn "Block request cancelled", addresses, err = err.msg + raise err + except CatchableError as err: + error "Error getting blocks from exchange engine", addresses, err = err.msg + failure err + + inc(completed) + return value + + return SafeAsyncIter[Block].new(genNext, isFinished) proc requestBlock*( self: BlockExcEngine, address: BlockAddress @@ -239,60 +512,64 @@ proc completeBlock*(self: BlockExcEngine, address: BlockAddress, blk: Block) = proc blockPresenceHandler*( self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] ) {.async: (raises: []).} = - trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) + trace "Received block presence from peer", peer, len = blocks.len let peerCtx = self.peers.get(peer) - ourWantList = toSeq(self.pendingBlocks.wantList) + ourWantList = toHashSet(self.pendingBlocks.wantList.toSeq) if peerCtx.isNil: return + peerCtx.refreshReplied() + for blk in blocks: if presence =? Presence.init(blk): peerCtx.setPresence(presence) let peerHave = peerCtx.peerHave - dontWantCids = peerHave.filterIt(it notin ourWantList) + dontWantCids = peerHave - ourWantList if dontWantCids.len > 0: - peerCtx.cleanPresence(dontWantCids) + peerCtx.cleanPresence(dontWantCids.toSeq) let ourWantCids = ourWantList.filterIt( it in peerHave and not self.pendingBlocks.retriesExhausted(it) and - not self.pendingBlocks.isInFlight(it) - ) + self.pendingBlocks.markRequested(it, peer) + ).toSeq for address in ourWantCids: - self.pendingBlocks.setInFlight(address, true) self.pendingBlocks.decRetries(address) + peerCtx.blockRequestScheduled(address) if ourWantCids.len > 0: trace "Peer has blocks in our wantList", peer, wants = ourWantCids + # FIXME: this will result in duplicate requests for blocks if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption: warn "Failed to send wantBlock to peer", peer, err = err.msg + for address in ourWantCids: + self.pendingBlocks.clearRequest(address, peer.some) proc scheduleTasks( self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] ) {.async: (raises: [CancelledError]).} = - let cids = blocksDelivery.mapIt(it.blk.cid) - # schedule any new peers to provide blocks to for p in self.peers: - for c in cids: # for each cid + for blockDelivery in blocksDelivery: # for each cid # schedule a peer if it wants at least one cid # and we have it in our local store - if c in p.peerWantsCids: + if blockDelivery.address in p.wantedBlocks: + let cid = blockDelivery.blk.cid try: - if await (c in self.localStore): + if await (cid in self.localStore): # TODO: the try/except should go away once blockstore tracks exceptions self.scheduleTask(p) break except CancelledError as exc: - warn "Checking local store canceled", cid = c, err = exc.msg + warn "Checking local store canceled", cid = cid, err = exc.msg return except CatchableError as exc: - error "Error checking local store for cid", cid = c, err = exc.msg + error "Error checking local store for cid", cid = cid, err = exc.msg raiseAssert "Unexpected error checking local store for cid" proc cancelBlocks( @@ -301,28 +578,45 @@ proc cancelBlocks( ## Tells neighboring peers that we're no longer interested in a block. ## + let blocksDelivered = toHashSet(addrs) + var scheduledCancellations: Table[PeerId, HashSet[BlockAddress]] + if self.peers.len == 0: return - trace "Sending block request cancellations to peers", - addrs, peers = self.peers.peerIds - - proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = + proc dispatchCancellations( + entry: tuple[peerId: PeerId, addresses: HashSet[BlockAddress]] + ): Future[PeerId] {.async: (raises: [CancelledError]).} = + trace "Sending block request cancellations to peer", + peer = entry.peerId, addresses = entry.addresses.len await self.network.request.sendWantCancellations( - peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx) + peer = entry.peerId, addresses = entry.addresses.toSeq ) - return peerCtx + return entry.peerId try: - let (succeededFuts, failedFuts) = await allFinishedFailed[BlockExcPeerCtx]( - toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map( - processPeer - ) + for peerCtx in self.peers.peers.values: + # Do we have pending requests, towards this peer, for any of the blocks + # that were just delivered? + let intersection = peerCtx.blocksRequested.intersection(blocksDelivered) + if intersection.len > 0: + # If so, schedules a cancellation. + scheduledCancellations[peerCtx.id] = intersection + + if scheduledCancellations.len == 0: + return + + let (succeededFuts, failedFuts) = await allFinishedFailed[PeerId]( + toSeq(scheduledCancellations.pairs).map(dispatchCancellations) ) - (await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx): - peerCtx.cleanPresence(addrs) + (await allFinished(succeededFuts)).mapIt(it.read).apply do(peerId: PeerId): + let ctx = self.peers.get(peerId) + if not ctx.isNil: + ctx.cleanPresence(addrs) + for address in scheduledCancellations[peerId]: + ctx.blockRequestCancelled(address) if failedFuts.len > 0: warn "Failed to send block request cancellations to peers", peers = failedFuts.len @@ -392,17 +686,31 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void = return success() proc blocksDeliveryHandler*( - self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] + self: BlockExcEngine, + peer: PeerId, + blocksDelivery: seq[BlockDelivery], + allowSpurious: bool = false, ) {.async: (raises: []).} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) var validatedBlocksDelivery: seq[BlockDelivery] + let peerCtx = self.peers.get(peer) + + let runtimeQuota = 10.milliseconds + var lastIdle = Moment.now() + for bd in blocksDelivery: logScope: peer = peer address = bd.address try: + # Unknown peers and unrequested blocks are dropped with a warning. + if not allowSpurious and (peerCtx == nil or not peerCtx.blockReceived(bd.address)): + warn "Dropping unrequested or duplicate block received from peer" + codex_block_exchange_spurious_blocks_received.inc() + continue + if err =? self.validateBlockDelivery(bd).errorOption: warn "Block validation failed", msg = err.msg continue @@ -422,15 +730,25 @@ proc blocksDeliveryHandler*( ).errorOption: warn "Unable to store proof and cid for a block" continue + except CancelledError: + trace "Block delivery handling cancelled" except CatchableError as exc: warn "Error handling block delivery", error = exc.msg continue validatedBlocksDelivery.add(bd) + if (Moment.now() - lastIdle) >= runtimeQuota: + try: + await idleAsync() + except CancelledError: + discard + except CatchableError: + discard + lastIdle = Moment.now() + codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) - let peerCtx = self.peers.get(peer) if peerCtx != nil: if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption: warn "Error paying for blocks", err = err.msg @@ -454,16 +772,17 @@ proc wantListHandler*( presence: seq[BlockPresence] schedulePeer = false + let runtimeQuota = 10.milliseconds + var lastIdle = Moment.now() + try: for e in wantList.entries: - let idx = peerCtx.peerWants.findIt(it.address == e.address) - logScope: peer = peerCtx.id address = e.address wantType = $e.wantType - if idx < 0: # Adding new entry to peer wants + if e.address notin peerCtx.wantedBlocks: # Adding new entry to peer wants let have = try: @@ -474,6 +793,8 @@ proc wantListHandler*( price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) if e.cancel: + # This is sort of expected if we sent the block to the peer, as we have removed + # it from the peer's wantlist ourselves. trace "Received cancelation for untracked block, skipping", address = e.address continue @@ -482,12 +803,14 @@ proc wantListHandler*( case e.wantType of WantType.WantHave: if have: + trace "We HAVE the block", address = e.address presence.add( BlockPresence( address: e.address, `type`: BlockPresenceType.Have, price: price ) ) else: + trace "We DON'T HAVE the block", address = e.address if e.sendDontHave: presence.add( BlockPresence( @@ -497,28 +820,35 @@ proc wantListHandler*( codex_block_exchange_want_have_lists_received.inc() of WantType.WantBlock: - peerCtx.peerWants.add(e) + peerCtx.wantedBlocks.incl(e.address) schedulePeer = true codex_block_exchange_want_block_lists_received.inc() else: # Updating existing entry in peer wants # peer doesn't want this block anymore if e.cancel: trace "Canceling want for block", address = e.address - peerCtx.peerWants.del(idx) + peerCtx.wantedBlocks.excl(e.address) trace "Canceled block request", - address = e.address, len = peerCtx.peerWants.len + address = e.address, len = peerCtx.wantedBlocks.len else: + trace "Peer has requested a block more than once", address = e.address if e.wantType == WantType.WantBlock: schedulePeer = true - # peer might want to ask for the same cid with - # different want params - trace "Updating want for block", address = e.address - peerCtx.peerWants[idx] = e # update entry - trace "Updated block request", - address = e.address, len = peerCtx.peerWants.len + if presence.len >= PresenceBatchSize or (Moment.now() - lastIdle) >= runtimeQuota: + if presence.len > 0: + trace "Sending presence batch to remote", items = presence.len + await self.network.request.sendPresence(peer, presence) + presence = @[] + try: + await idleAsync() + except CancelledError: + discard + lastIdle = Moment.now() + + # Send any remaining presence messages if presence.len > 0: - trace "Sending presence to remote", items = presence.mapIt($it).join(",") + trace "Sending final presence to remote", items = presence.len await self.network.request.sendPresence(peer, presence) if schedulePeer: @@ -550,7 +880,7 @@ proc paymentHandler*( else: context.paymentChannel = self.wallet.acceptChannel(payment).option -proc setupPeer*( +proc peerAddedHandler*( self: BlockExcEngine, peer: PeerId ) {.async: (raises: [CancelledError]).} = ## Perform initial setup, such as want @@ -560,88 +890,85 @@ proc setupPeer*( trace "Setting up peer", peer if peer notin self.peers: + let peerCtx = BlockExcPeerCtx(id: peer, activityTimeout: DefaultPeerActivityTimeout) trace "Setting up new peer", peer - self.peers.add(BlockExcPeerCtx(id: peer)) + self.peers.add(peerCtx) trace "Added peer", peers = self.peers.len - - # broadcast our want list, the other peer will do the same - if self.pendingBlocks.wantListLen > 0: - trace "Sending our want list to a peer", peer - let cids = toSeq(self.pendingBlocks.wantList) - await self.network.request.sendWantList(peer, cids, full = true) + await self.refreshBlockKnowledge(peerCtx) if address =? self.pricing .? address: trace "Sending account to peer", peer await self.network.request.sendAccount(peer, Account(address: address)) -proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} = - ## Cleanup disconnected peer - ## +proc localLookup( + self: BlockExcEngine, address: BlockAddress +): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} = + if address.leaf: + (await self.localStore.getBlockAndProof(address.treeCid, address.index)).map( + (blkAndProof: (Block, CodexProof)) => + BlockDelivery(address: address, blk: blkAndProof[0], proof: blkAndProof[1].some) + ) + else: + (await self.localStore.getBlock(address)).map( + (blk: Block) => BlockDelivery(address: address, blk: blk, proof: CodexProof.none) + ) - trace "Dropping peer", peer +iterator splitBatches[T](sequence: seq[T], batchSize: int): seq[T] = + var batch: seq[T] + for element in sequence: + if batch.len == batchSize: + yield batch + batch = @[] + batch.add(element) - # drop the peer from the peers table - self.peers.remove(peer) + if batch.len > 0: + yield batch proc taskHandler*( - self: BlockExcEngine, task: BlockExcPeerCtx -) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} = + self: BlockExcEngine, peerCtx: BlockExcPeerCtx +) {.async: (raises: [CancelledError, RetriesExhaustedError]).} = # Send to the peer blocks he wants to get, # if they present in our local store - # TODO: There should be all sorts of accounting of - # bytes sent/received here + # Blocks that have been sent have already been picked up by other tasks and + # should not be re-sent. + var + wantedBlocks = peerCtx.wantedBlocks.filterIt(not peerCtx.isBlockSent(it)) + sent: HashSet[BlockAddress] - var wantsBlocks = - task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight) + trace "Running task for peer", peer = peerCtx.id - proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) = - for peerWant in task.peerWants.mitems: - if peerWant.address in addresses: - peerWant.inFlight = inFlight + for wantedBlock in wantedBlocks: + peerCtx.markBlockAsSent(wantedBlock) - if wantsBlocks.len > 0: - # Mark wants as in-flight. - let wantAddresses = wantsBlocks.mapIt(it.address) - updateInFlight(wantAddresses, true) - wantsBlocks.sort(SortOrder.Descending) + try: + for batch in wantedBlocks.toSeq.splitBatches(self.maxBlocksPerMessage): + var blockDeliveries: seq[BlockDelivery] + for wantedBlock in batch: + # I/O is blocking so looking up blocks sequentially is fine. + without blockDelivery =? await self.localLookup(wantedBlock), err: + error "Error getting block from local store", + err = err.msg, address = wantedBlock + peerCtx.markBlockAsNotSent(wantedBlock) + continue + blockDeliveries.add(blockDelivery) + sent.incl(wantedBlock) - proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} = - if e.address.leaf: - (await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( - (blkAndProof: (Block, CodexProof)) => - BlockDelivery( - address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some - ) - ) - else: - (await self.localStore.getBlock(e.address)).map( - (blk: Block) => - BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) - ) + if blockDeliveries.len == 0: + continue - let - blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) - blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt: - if bd =? it.value: - bd - else: - raiseAssert "Unexpected error in local lookup" - - # All the wants that failed local lookup must be set to not-in-flight again. - let - successAddresses = blocksDelivery.mapIt(it.address) - failedAddresses = wantAddresses.filterIt(it notin successAddresses) - updateInFlight(failedAddresses, false) - - if blocksDelivery.len > 0: - trace "Sending blocks to peer", - peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) - await self.network.request.sendBlocksDelivery(task.id, blocksDelivery) - - codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) - - task.peerWants.keepItIf(it.address notin successAddresses) + await self.network.request.sendBlocksDelivery(peerCtx.id, blockDeliveries) + codex_block_exchange_blocks_sent.inc(blockDeliveries.len.int64) + # Drops the batch from the peer's set of wanted blocks; i.e. assumes that after + # we send the blocks, then the peer no longer wants them, so we don't need to + # re-send them. Note that the send might still fail down the line and we will + # have removed those anyway. At that point, we rely on the requester performing + # a retry for the request to succeed. + peerCtx.wantedBlocks.keepItIf(it notin sent) + finally: + # Better safe than sorry: if an exception does happen, we don't want to keep + # those as sent, as it'll effectively prevent the blocks from ever being sent again. + peerCtx.blocksSent.keepItIf(it notin wantedBlocks) proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = ## process tasks @@ -652,11 +979,47 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = while self.blockexcRunning: let peerCtx = await self.taskQueue.pop() await self.taskHandler(peerCtx) + except CancelledError: + trace "block exchange task runner cancelled" except CatchableError as exc: error "error running block exchange task", error = exc.msg info "Exiting blockexc task runner" +proc selectRandom*( + peers: seq[BlockExcPeerCtx] +): BlockExcPeerCtx {.gcsafe, raises: [].} = + if peers.len == 1: + return peers[0] + + proc evalPeerScore(peer: BlockExcPeerCtx): float = + let + loadPenalty = peer.blocksRequested.len.float * 2.0 + successRate = + if peer.exchanged > 0: + peer.exchanged.float / (peer.exchanged + peer.blocksRequested.len).float + else: + 0.5 + failurePenalty = (1.0 - successRate) * 5.0 + return loadPenalty + failurePenalty + + let + scores = peers.mapIt(evalPeerScore(it)) + maxScore = scores.max() + 1.0 + weights = scores.mapIt(maxScore - it) + + var totalWeight = 0.0 + for w in weights: + totalWeight += w + + var r = rand(totalWeight) + for i, weight in weights: + r -= weight + if r <= 0.0: + return peers[i] + + return peers[^1] + proc new*( T: type BlockExcEngine, localStore: BlockStore, @@ -666,7 +1029,9 @@ proc new*( advertiser: Advertiser, peerStore: PeerCtxStore, pendingBlocks: PendingBlocksManager, + maxBlocksPerMessage = DefaultMaxBlocksPerMessage, concurrentTasks = DefaultConcurrentTasks, + selectPeer: PeerSelector = selectRandom, ): BlockExcEngine = ## Create new block exchange engine instance ## @@ -679,23 +1044,13 @@ proc new*( wallet: wallet, concurrentTasks: concurrentTasks, trackedFutures: TrackedFutures(), + maxBlocksPerMessage: maxBlocksPerMessage, taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), discovery: discovery, advertiser: advertiser, + selectPeer: selectPeer, ) - proc peerEventHandler( - peerId: PeerId, event: PeerEvent - ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = - if event.kind == PeerEventKind.Joined: - await self.setupPeer(peerId) - else: - self.dropPeer(peerId) - - if not isNil(network.switch): - network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) - network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc blockWantListHandler( peer: PeerId, wantList: WantList ): Future[void] {.async: (raises: []).} = @@ -721,12 +1076,24 @@ proc new*( ): Future[void] {.async: (raises: []).} = self.paymentHandler(peer, payment) + proc peerAddedHandler( + peer: PeerId + ): Future[void] {.async: (raises: [CancelledError]).} = + await self.peerAddedHandler(peer) + + proc peerDepartedHandler( + peer: PeerId + ): Future[void] {.async: (raises: [CancelledError]).} = + self.evictPeer(peer) + network.handlers = BlockExcHandlers( onWantList: blockWantListHandler, onBlocksDelivery: blocksDeliveryHandler, onPresence: blockPresenceHandler, onAccount: accountHandler, onPayment: paymentHandler, + onPeerJoined: peerAddedHandler, + onPeerDeparted: peerDepartedHandler, ) return self diff --git a/codex/blockexchange/engine/payments.nim b/codex/blockexchange/engine/payments.nim index 260a3005..45259762 100644 --- a/codex/blockexchange/engine/payments.nim +++ b/codex/blockexchange/engine/payments.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index 80c88527..f843870f 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -34,7 +34,7 @@ declareGauge( const DefaultBlockRetries* = 3000 - DefaultRetryInterval* = 500.millis + DefaultRetryInterval* = 2.seconds type RetriesExhaustedError* = object of CatchableError @@ -42,7 +42,7 @@ type BlockReq* = object handle*: BlockHandle - inFlight*: bool + requested*: ?PeerId blockRetries*: int startTime*: int64 @@ -50,12 +50,13 @@ type blockRetries*: int = DefaultBlockRetries retryInterval*: Duration = DefaultRetryInterval blocks*: Table[BlockAddress, BlockReq] # pending Block requests + lastInclusion*: Moment # time at which we last included a block into our wantlist proc updatePendingBlockGauge(p: PendingBlocksManager) = codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) proc getWantHandle*( - self: PendingBlocksManager, address: BlockAddress, inFlight = false + self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none ): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ## Add an event for a block ## @@ -65,11 +66,13 @@ proc getWantHandle*( do: let blk = BlockReq( handle: newFuture[Block]("pendingBlocks.getWantHandle"), - inFlight: inFlight, + requested: requested, blockRetries: self.blockRetries, startTime: getMonoTime().ticks, ) self.blocks[address] = blk + self.lastInclusion = Moment.now() + let handle = blk.handle proc cleanUpBlock(data: pointer) {.raises: [].} = @@ -86,9 +89,9 @@ proc getWantHandle*( return handle proc getWantHandle*( - self: PendingBlocksManager, cid: Cid, inFlight = false + self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none ): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = - self.getWantHandle(BlockAddress.init(cid), inFlight) + self.getWantHandle(BlockAddress.init(cid), requested) proc completeWantHandle*( self: PendingBlocksManager, address: BlockAddress, blk: Block @@ -121,9 +124,6 @@ proc resolve*( blockReq.handle.complete(bd.blk) codex_block_exchange_retrieval_time_us.set(retrievalDurationUs) - - if retrievalDurationUs > 500000: - warn "High block retrieval time", retrievalDurationUs, address = bd.address else: trace "Block handle already finished", address = bd.address @@ -141,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool self.blocks.withValue(address, pending): result = pending[].blockRetries <= 0 -func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) = - ## Set inflight status for a block +func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool = + ## Check if a block has been requested to a peer + ## + result = false + self.blocks.withValue(address, pending): + result = pending[].requested.isSome + +func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId = + ## Returns the peer that requested this block + ## + result = PeerId.none + self.blocks.withValue(address, pending): + result = pending[].requested + +proc markRequested*( + self: PendingBlocksManager, address: BlockAddress, peer: PeerId +): bool = + ## Marks this block as having been requested to a peer ## - self.blocks.withValue(address, pending): - pending[].inFlight = inFlight - -func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool = - ## Check if a block is in flight - ## + if self.isRequested(address): + return false self.blocks.withValue(address, pending): - result = pending[].inFlight + pending[].requested = peer.some + return true + +proc clearRequest*( + self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none +) = + self.blocks.withValue(address, pending): + if peer.isSome: + assert peer == pending[].requested + pending[].requested = PeerId.none func contains*(self: PendingBlocksManager, cid: Cid): bool = BlockAddress.init(cid) in self.blocks diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index d4754110..4c6ca41a 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -35,15 +35,14 @@ const DefaultMaxInflight* = 100 type - WantListHandler* = - proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).} + WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).} BlocksDeliveryHandler* = - proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).} + proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).} BlockPresenceHandler* = - proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).} - AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).} - PaymentHandler* = - proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).} + proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).} + AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).} + PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).} + PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).} BlockExcHandlers* = object onWantList*: WantListHandler @@ -51,6 +50,9 @@ type onPresence*: BlockPresenceHandler onAccount*: AccountHandler onPayment*: PaymentHandler + onPeerJoined*: PeerEventHandler + onPeerDeparted*: PeerEventHandler + onPeerDropped*: PeerEventHandler WantListSender* = proc( id: PeerId, @@ -240,96 +242,116 @@ proc handlePayment( await network.handlers.onPayment(peer.id, payment) proc rpcHandler( - b: BlockExcNetwork, peer: NetworkPeer, msg: Message + self: BlockExcNetwork, peer: NetworkPeer, msg: Message ) {.async: (raises: []).} = ## handle rpc messages ## if msg.wantList.entries.len > 0: - b.trackedFutures.track(b.handleWantList(peer, msg.wantList)) + self.trackedFutures.track(self.handleWantList(peer, msg.wantList)) if msg.payload.len > 0: - b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload)) + self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload)) if msg.blockPresences.len > 0: - b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences)) + self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences)) if account =? Account.init(msg.account): - b.trackedFutures.track(b.handleAccount(peer, account)) + self.trackedFutures.track(self.handleAccount(peer, account)) if payment =? SignedState.init(msg.payment): - b.trackedFutures.track(b.handlePayment(peer, payment)) + self.trackedFutures.track(self.handlePayment(peer, payment)) -proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = +proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer = ## Creates or retrieves a BlockExcNetwork Peer ## - if peer in b.peers: - return b.peers.getOrDefault(peer, nil) + if peer in self.peers: + return self.peers.getOrDefault(peer, nil) var getConn: ConnProvider = proc(): Future[Connection] {. async: (raises: [CancelledError]) .} = try: trace "Getting new connection stream", peer - return await b.switch.dial(peer, Codec) + return await self.switch.dial(peer, Codec) except CancelledError as error: raise error except CatchableError as exc: trace "Unable to connect to blockexc peer", exc = exc.msg - if not isNil(b.getConn): - getConn = b.getConn + if not isNil(self.getConn): + getConn = self.getConn let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} = - await b.rpcHandler(p, msg) + await self.rpcHandler(p, msg) # create new pubsub peer let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler) debug "Created new blockexc peer", peer - b.peers[peer] = blockExcPeer + self.peers[peer] = blockExcPeer return blockExcPeer -proc setupPeer*(b: BlockExcNetwork, peer: PeerId) = - ## Perform initial setup, such as want - ## list exchange - ## - - discard b.getOrCreatePeer(peer) - -proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} = +proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} = ## Dial a peer ## - if b.isSelf(peer.peerId): + if self.isSelf(peer.peerId): trace "Skipping dialing self", peer = peer.peerId return - if peer.peerId in b.peers: + if peer.peerId in self.peers: trace "Already connected to peer", peer = peer.peerId return - await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) + await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) -proc dropPeer*(b: BlockExcNetwork, peer: PeerId) = +proc dropPeer*( + self: BlockExcNetwork, peer: PeerId +) {.async: (raises: [CancelledError]).} = + trace "Dropping peer", peer + + try: + if not self.switch.isNil: + await self.switch.disconnect(peer) + except CatchableError as error: + warn "Error attempting to disconnect from peer", peer = peer, error = error.msg + + if not self.handlers.onPeerDropped.isNil: + await self.handlers.onPeerDropped(peer) + +proc handlePeerJoined*( + self: BlockExcNetwork, peer: PeerId +) {.async: (raises: [CancelledError]).} = + discard self.getOrCreatePeer(peer) + if not self.handlers.onPeerJoined.isNil: + await self.handlers.onPeerJoined(peer) + +proc handlePeerDeparted*( + self: BlockExcNetwork, peer: PeerId +) {.async: (raises: [CancelledError]).} = ## Cleanup disconnected peer ## - trace "Dropping peer", peer - b.peers.del(peer) + trace "Cleaning up departed peer", peer + self.peers.del(peer) + if not self.handlers.onPeerDeparted.isNil: + await self.handlers.onPeerDeparted(peer) -method init*(self: BlockExcNetwork) = +method init*(self: BlockExcNetwork) {.raises: [].} = ## Perform protocol initialization ## proc peerEventHandler( peerId: PeerId, event: PeerEvent - ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = + ): Future[void] {.async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: - self.setupPeer(peerId) + await self.handlePeerJoined(peerId) + elif event.kind == PeerEventKind.Left: + await self.handlePeerDeparted(peerId) else: - self.dropPeer(peerId) + warn "Unknown peer event", event self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 66c39294..927303a6 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -24,10 +24,9 @@ logScope: const DefaultYieldInterval = 50.millis type - ConnProvider* = - proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).} + ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).} - RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).} + RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).} NetworkPeer* = ref object of RootObj id*: PeerId @@ -65,7 +64,9 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} = except CatchableError as err: warn "Exception in blockexc read loop", msg = err.msg finally: - trace "Detaching read loop", peer = self.id, connId = conn.oid + warn "Detaching read loop", peer = self.id, connId = conn.oid + if self.sendConn == conn: + self.sendConn = nil await conn.close() proc connect*( @@ -89,7 +90,12 @@ proc send*( return trace "Sending message", peer = self.id, connId = conn.oid - await conn.writeLp(protobufEncode(msg)) + try: + await conn.writeLp(protobufEncode(msg)) + except CatchableError as err: + if self.sendConn == conn: + self.sendConn = nil + raise newException(LPStreamError, "Failed to send message: " & err.msg) func new*( T: type NetworkPeer, diff --git a/codex/blockexchange/peers/peercontext.nim b/codex/blockexchange/peers/peercontext.nim index 7a299b6b..a02e8a89 100644 --- a/codex/blockexchange/peers/peercontext.nim +++ b/codex/blockexchange/peers/peercontext.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -25,28 +25,77 @@ import ../../logutils export payments, nitro +const + MinRefreshInterval = 1.seconds + MaxRefreshBackoff = 36 # 36 seconds + MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message + type BlockExcPeerCtx* = ref object of RootObj id*: PeerId blocks*: Table[BlockAddress, Presence] # remote peer have list including price - peerWants*: seq[WantListEntry] # remote peers want lists + wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants exchanged*: int # times peer has exchanged with us - lastExchange*: Moment # last time peer has exchanged with us + refreshInProgress*: bool # indicates if a refresh is in progress + lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has + refreshBackoff*: int = 1 # backoff factor for refresh requests account*: ?Account # ethereum account of this peer paymentChannel*: ?ChannelId # payment channel id + blocksSent*: HashSet[BlockAddress] # blocks sent to peer + blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer + lastExchange*: Moment # last time peer has sent us a block + activityTimeout*: Duration + lastSentWants*: HashSet[BlockAddress] + # track what wantList we last sent for delta updates -proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] = - toSeq(self.blocks.keys) +proc isKnowledgeStale*(self: BlockExcPeerCtx): bool = + let staleness = + self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now() -proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] = - self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet + if staleness and self.refreshInProgress: + trace "Cleaning up refresh state", peer = self.id + self.refreshInProgress = false + self.refreshBackoff = 1 -proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] = - self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet + staleness + +proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool = + address in self.blocksSent + +proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) = + self.blocksSent.incl(address) + +proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) = + self.blocksSent.excl(address) + +proc refreshRequested*(self: BlockExcPeerCtx) = + trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff + self.refreshInProgress = true + self.lastRefresh = Moment.now() + +proc refreshReplied*(self: BlockExcPeerCtx) = + self.refreshInProgress = false + self.lastRefresh = Moment.now() + self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff) + +proc havesUpdated(self: BlockExcPeerCtx) = + self.refreshBackoff = 1 + +proc wantsUpdated*(self: BlockExcPeerCtx) = + self.refreshBackoff = 1 + +proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] = + # XXX: this is ugly an inefficient, but since those will typically + # be used in "joins", it's better to pay the price here and have + # a linear join than to not do it and have a quadratic join. + toHashSet(self.blocks.keys.toSeq) proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool = address in self.blocks func setPresence*(self: BlockExcPeerCtx, presence: Presence) = + if presence.address notin self.blocks: + self.havesUpdated() + self.blocks[presence.address] = presence func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) = @@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 = price += precense[].price price + +proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) = + ## Adds a block the set of blocks that have been requested to this peer + ## (its request schedule). + if self.blocksRequested.len == 0: + self.lastExchange = Moment.now() + self.blocksRequested.incl(address) + +proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) = + ## Removes a block from the set of blocks that have been requested to this peer + ## (its request schedule). + self.blocksRequested.excl(address) + +proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool = + let wasRequested = address in self.blocksRequested + self.blocksRequested.excl(address) + self.lastExchange = Moment.now() + wasRequested + +proc activityTimer*( + self: BlockExcPeerCtx +): Future[void] {.async: (raises: [CancelledError]).} = + ## This is called by the block exchange when a block is scheduled for this peer. + ## If the peer sends no blocks for a while, it is considered inactive/uncooperative + ## and the peer is dropped. Note that ANY block that the peer sends will reset this + ## timer for all blocks. + ## + while true: + let idleTime = Moment.now() - self.lastExchange + if idleTime > self.activityTimeout: + return + + await sleepAsync(self.activityTimeout - idleTime) diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index ce2506a8..853bd33f 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -62,21 +62,23 @@ func len*(self: PeerCtxStore): int = self.peers.len func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address)) + toSeq(self.peers.values).filterIt(address in it.peerHave) func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = + # FIXME: this is way slower and can end up leading to unexpected performance loss. toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid)) func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address)) + toSeq(self.peers.values).filterIt(address in it.wantedBlocks) func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) + # FIXME: this is way slower and can end up leading to unexpected performance loss. + toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid)) proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = var res: PeersForBlock = (@[], @[]) for peer in self: - if peer.peerHave.anyIt(it == address): + if address in peer: res.with.add(peer) else: res.without.add(peer) diff --git a/codex/blockexchange/protobuf/blockexc.nim b/codex/blockexchange/protobuf/blockexc.nim index 69868681..9b1144ba 100644 --- a/codex/blockexchange/protobuf/blockexc.nim +++ b/codex/blockexchange/protobuf/blockexc.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,7 +9,6 @@ import std/hashes import std/sequtils -import pkg/stew/endians2 import message @@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry export BlockDelivery, BlockPresenceType, BlockPresence export AccountMessage, StateChannelUpdate -proc hash*(a: BlockAddress): Hash = - if a.leaf: - let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE) - hash(data) - else: - hash(a.cid.data.buffer) - proc hash*(e: WantListEntry): Hash = hash(e.address) diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index 4db89729..03c9dd00 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -1,4 +1,4 @@ -# Protocol of data exchange between Codex nodes +# Protocol of data exchange between Logos Storage nodes # and Protobuf encoder/decoder for these messages. # # Eventually all this code should be auto-generated from message.proto. @@ -25,11 +25,15 @@ type WantListEntry* = object address*: BlockAddress + # XXX: I think explicit priority is pointless as the peer will request + # the blocks in the order it wants to receive them, and all we have to + # do is process those in the same order as we send them back. It also + # complicates things for no reason at the moment, as the priority is + # always set to 0. priority*: int32 # The priority (normalized). default to 1 cancel*: bool # Whether this revokes an entry wantType*: WantType # Note: defaults to enum 0, ie Block sendDontHave*: bool # Note: defaults to false - inFlight*: bool # Whether block sending is in progress. Not serialized. WantList* = object entries*: seq[WantListEntry] # A list of wantList entries diff --git a/codex/blockexchange/protobuf/message.proto b/codex/blockexchange/protobuf/message.proto index 65d1f9a9..ee24c797 100644 --- a/codex/blockexchange/protobuf/message.proto +++ b/codex/blockexchange/protobuf/message.proto @@ -1,4 +1,4 @@ -// Protocol of data exchange between Codex nodes. +// Protocol of data exchange between Logos Storage nodes. // Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md syntax = "proto3"; diff --git a/codex/blocktype.nim b/codex/blocktype.nim index 7e13493d..b5a1019c 100644 --- a/codex/blocktype.nim +++ b/codex/blocktype.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,16 +9,14 @@ import std/tables import std/sugar +import std/hashes export tables -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/libp2p/[cid, multicodec, multihash] -import pkg/stew/byteutils +import pkg/stew/[byteutils, endians2] import pkg/questionable import pkg/questionable/results @@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string = else: "cid: " & $a.cid +proc hash*(a: BlockAddress): Hash = + if a.leaf: + let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE) + hash(data) + else: + hash(a.cid.data.buffer) + proc cidOrTreeCid*(a: BlockAddress): Cid = if a.leaf: a.treeCid else: a.cid diff --git a/codex/chunker.nim b/codex/chunker.nim index 908dd0c0..75758bc4 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,10 +9,7 @@ # TODO: This is super inneficient and needs a rewrite, but it'll do for now -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/questionable import pkg/questionable/results @@ -31,7 +28,7 @@ type ChunkerError* = object of CatchableError ChunkBuffer* = ptr UncheckedArray[byte] Reader* = proc(data: ChunkBuffer, len: int): Future[int] {. - gcsafe, async: (raises: [ChunkerError, CancelledError]) + async: (raises: [ChunkerError, CancelledError]) .} # Reader that splits input data into fixed-size chunks @@ -77,7 +74,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} = var res = 0 try: while res < len: @@ -105,7 +102,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} = var total = 0 try: while total < len: diff --git a/codex/clock.nim b/codex/clock.nim index c02e04aa..c0867afa 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -1,6 +1,7 @@ +{.push raises: [].} + import pkg/chronos import pkg/stew/endians2 -import pkg/upraises import pkg/stint type @@ -8,10 +9,12 @@ type SecondsSince1970* = int64 Timeout* = object of CatchableError -method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} = +method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} = raiseAssert "not implemented" -method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} = +method waitUntil*( + clock: Clock, time: SecondsSince1970 +) {.base, async: (raises: [CancelledError]).} = raiseAssert "not implemented" method start*(clock: Clock) {.base, async.} = diff --git a/codex/codex.nim b/codex/codex.nim index 81357464..3c28ec01 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -12,6 +12,7 @@ import std/strutils import std/os import std/tables import std/cpuinfo +import std/net import pkg/chronos import pkg/taskpools @@ -21,7 +22,6 @@ import pkg/confutils import pkg/confutils/defs import pkg/nitro import pkg/stew/io2 -import pkg/stew/shims/net as stewnet import pkg/datastore import pkg/ethers except Rng import pkg/stew/io2 @@ -57,10 +57,20 @@ type repoStore: RepoStore maintenance: BlockMaintainer taskpool: Taskpool + isStarted: bool CodexPrivateKey* = libp2p.PrivateKey # alias EthWallet = ethers.Wallet +func config*(self: CodexServer): CodexConf = + return self.config + +func node*(self: CodexServer): CodexNodeRef = + return self.codexNode + +func repoStore*(self: CodexServer): RepoStore = + return self.repoStore + proc waitForSync(provider: Provider): Future[void] {.async.} = var sleepTime = 1 trace "Checking sync state of Ethereum provider..." @@ -128,7 +138,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = # This is used for simulation purposes. Normal nodes won't be compiled with this flag # and hence the proof failure will always be 0. - when codex_enable_proof_failures: + when storage_enable_proof_failures: let proofFailures = config.simulateProofFailures if proofFailures > 0: warn "Enabling proof failure simulation!" @@ -159,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = s.codexNode.contracts = (client, host, validator) proc start*(s: CodexServer) {.async.} = - trace "Starting codex node", config = $s.config + if s.isStarted: + warn "Storage server already started, skipping" + return + trace "Starting Storage node", config = $s.config await s.repoStore.start() + s.maintenance.start() await s.codexNode.switch.start() @@ -175,27 +189,55 @@ proc start*(s: CodexServer) {.async.} = await s.bootstrapInteractions() await s.codexNode.start() - s.restServer.start() + + if s.restServer != nil: + s.restServer.start() + + s.isStarted = true proc stop*(s: CodexServer) {.async.} = - notice "Stopping codex node" + if not s.isStarted: + warn "Storage is not started" + return - let res = await noCancel allFinishedFailed[void]( + notice "Stopping Storage node" + + var futures = @[ - s.restServer.stop(), s.codexNode.switch.stop(), s.codexNode.stop(), s.repoStore.stop(), s.maintenance.stop(), ] - ) + + if s.restServer != nil: + futures.add(s.restServer.stop()) + + let res = await noCancel allFinishedFailed[void](futures) if res.failure.len > 0: - error "Failed to stop codex node", failures = res.failure.len - raiseAssert "Failed to stop codex node" + error "Failed to stop Storage node", failures = res.failure.len + raiseAssert "Failed to stop Storage node" + +proc close*(s: CodexServer) {.async.} = + var futures = @[s.codexNode.close(), s.repoStore.close()] + + let res = await noCancel allFinishedFailed[void](futures) if not s.taskpool.isNil: - s.taskpool.shutdown() + try: + s.taskpool.shutdown() + except Exception as exc: + error "Failed to stop the taskpool", failures = res.failure.len + raiseAssert("Failure in taskpool shutdown:" & exc.msg) + + if res.failure.len > 0: + error "Failed to close Storage node", failures = res.failure.len + raiseAssert "Failed to close Storage node" + +proc shutdown*(server: CodexServer) {.async.} = + await server.stop() + await server.close() proc new*( T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey @@ -211,21 +253,21 @@ proc new*( .withMaxConnections(config.maxPeers) .withAgentVersion(config.agentString) .withSignedPeerRecord(true) - .withTcpTransport({ServerFlags.ReuseAddr}) + .withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay}) .build() var cache: CacheStore = nil - taskpool: Taskpool + taskPool: Taskpool try: if config.numThreads == ThreadCount(0): - taskpool = Taskpool.new(numThreads = min(countProcessors(), 16)) + taskPool = Taskpool.new(numThreads = min(countProcessors(), 16)) else: - taskpool = Taskpool.new(numThreads = int(config.numThreads)) - info "Threadpool started", numThreads = taskpool.numThreads + taskPool = Taskpool.new(numThreads = int(config.numThreads)) + info "Threadpool started", numThreads = taskPool.numThreads except CatchableError as exc: - raiseAssert("Failure in taskpool initialization:" & exc.msg) + raiseAssert("Failure in taskPool initialization:" & exc.msg) if config.cacheSize > 0'nb: cache = CacheStore.new(cacheSize = config.cacheSize) @@ -295,7 +337,7 @@ proc new*( ) peerStore = PeerCtxStore.new() - pendingBlocks = PendingBlocksManager.new() + pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries) advertiser = Advertiser.new(repoStore, discovery) blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) @@ -307,7 +349,7 @@ proc new*( if config.prover: let backend = config.initializeBackend().expect("Unable to create prover backend.") - some Prover.new(store, backend, config.numProofSamples) + some Prover.new(store, backend, config.numProofSamples, taskPool) else: none Prover @@ -317,13 +359,16 @@ proc new*( engine = engine, discovery = discovery, prover = prover, - taskPool = taskpool, + taskPool = taskPool, ) + var restServer: RestServerRef = nil + + if config.apiBindAddress.isSome: restServer = RestServerRef .new( codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), - initTAddress(config.apiBindAddress, config.apiPort), + initTAddress(config.apiBindAddress.get(), config.apiPort), bufferSize = (1024 * 64), maxRequestBodySize = int.high, ) @@ -337,5 +382,5 @@ proc new*( restServer: restServer, repoStore: repoStore, maintenance: maintenance, - taskpool: taskpool, + taskPool: taskPool, ) diff --git a/codex/codextypes.nim b/codex/codextypes.nim index 274b9be0..67e90242 100644 --- a/codex/codextypes.nim +++ b/codex/codextypes.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/conf.nim b/codex/conf.nim index 77ef96ca..bc8b5506 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -16,8 +16,10 @@ import std/terminal # Is not used in tests {.pop.} import std/options +import std/parseutils import std/strutils import std/typetraits +import std/net import pkg/chronos import pkg/chronicles/helpers @@ -27,13 +29,12 @@ import pkg/confutils/std/net import pkg/toml_serialization import pkg/metrics import pkg/metrics/chronos_httpserver -import pkg/stew/shims/net as stewnet -import pkg/stew/shims/parseutils import pkg/stew/byteutils import pkg/libp2p import pkg/ethers import pkg/questionable import pkg/questionable/results +import pkg/stew/base64 import ./codextypes import ./discovery @@ -46,13 +47,14 @@ import ./utils/natutils from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas from ./validationconfig import MaxSlots, ValidationGroups +from ./blockexchange/engine/pendingblocks import DefaultBlockRetries export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export ValidationGroups, MaxSlots export DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, - DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas + DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries type ThreadCount* = distinct Natural @@ -61,18 +63,18 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.} proc defaultDataDir*(): string = let dataDir = when defined(windows): - "AppData" / "Roaming" / "Codex" + "AppData" / "Roaming" / "Storage" elif defined(macosx): - "Library" / "Application Support" / "Codex" + "Library" / "Application Support" / "Storage" else: - ".cache" / "codex" + ".cache" / "storage" getHomeDir() / dataDir const - codex_enable_api_debug_peers* {.booldefine.} = false - codex_enable_proof_failures* {.booldefine.} = false - codex_enable_log_counter* {.booldefine.} = false + storage_enable_api_debug_peers* {.booldefine.} = false + storage_enable_proof_failures* {.booldefine.} = false + storage_enable_log_counter* {.booldefine.} = false DefaultThreadCount* = ThreadCount(0) @@ -135,7 +137,7 @@ type .}: Port dataDir* {. - desc: "The directory where codex will store configuration and data", + desc: "The directory where Storage will store configuration and data", defaultValue: defaultDataDir(), defaultValueDesc: "", abbr: "d", @@ -196,14 +198,16 @@ type .}: ThreadCount agentString* {. - defaultValue: "Codex", + defaultValue: "Logos Storage", desc: "Node agent string which is used as identifier in network", name: "agent-string" .}: string apiBindAddress* {. - desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr" - .}: string + desc: "The REST API bind address", + defaultValue: "127.0.0.1".some, + name: "api-bindaddr" + .}: Option[string] apiPort* {. desc: "The REST Api port", @@ -261,6 +265,13 @@ type name: "block-mn" .}: int + blockRetries* {. + desc: "Number of times to retry fetching a block before giving up", + defaultValue: DefaultBlockRetries, + defaultValueDesc: $DefaultBlockRetries, + name: "block-retries" + .}: int + cacheSize* {. desc: "The size of the block cache, 0 disables the cache - " & @@ -380,7 +391,7 @@ type case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd of PersistenceCmd.prover: circuitDir* {. - desc: "Directory where Codex will store proof circuit data", + desc: "Directory where Storage will store proof circuit data", defaultValue: defaultDataDir() / "circuits", defaultValueDesc: "data/circuits", abbr: "cd", @@ -474,7 +485,7 @@ func prover*(self: CodexConf): bool = self.persistence and self.persistenceCmd == PersistenceCmd.prover proc getCodexVersion(): string = - let tag = strip(staticExec("git tag")) + let tag = strip(staticExec("git describe --tags --abbrev=0")) if tag.isEmptyOrWhitespace: return "untagged build" return tag @@ -485,7 +496,8 @@ proc getCodexRevision(): string = return res proc getCodexContractsRevision(): string = - let res = strip(staticExec("git rev-parse --short HEAD:vendor/codex-contracts-eth")) + let res = + strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth")) return res proc getNimBanner(): string = @@ -498,67 +510,85 @@ const nimBanner* = getNimBanner() codexFullVersion* = - "Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" & - "Codex contracts revision: " & codexContractsRevision & "\p" & nimBanner + "Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision & + "\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner proc parseCmdArg*( T: typedesc[MultiAddress], input: string -): MultiAddress {.upraises: [ValueError].} = +): MultiAddress {.raises: [ValueError].} = var ma: MultiAddress try: let res = MultiAddress.init(input) if res.isOk: ma = res.get() else: - warn "Invalid MultiAddress", input = input, error = res.error() + fatal "Invalid MultiAddress", input = input, error = res.error() quit QuitFailure except LPError as exc: - warn "Invalid MultiAddress uri", uri = input, error = exc.msg + fatal "Invalid MultiAddress uri", uri = input, error = exc.msg quit QuitFailure ma -proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = - let count = parseInt(input) - if count != 0 and count < 2: - warn "Invalid number of threads", input = input - quit QuitFailure - ThreadCount(count) +proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] = + try: + let count = parseInt(p) + if count != 0 and count < 2: + return err("Invalid number of threads: " & p) + return ok(ThreadCount(count)) + except ValueError as e: + return err("Invalid number of threads: " & p & ", error=" & e.msg) -proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = +proc parseCmdArg*(T: type ThreadCount, input: string): T = + let val = ThreadCount.parse(input) + if val.isErr: + fatal "Cannot parse the thread count.", input = input, error = val.error() + quit QuitFailure + return val.get() + +proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] = var res: SignedPeerRecord try: - if not res.fromURI(uri): - warn "Invalid SignedPeerRecord uri", uri = uri - quit QuitFailure - except LPError as exc: - warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg - quit QuitFailure - except CatchableError as exc: - warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg - quit QuitFailure - res + if not res.fromURI(p): + return err("The uri is not a valid SignedPeerRecord: " & p) + return ok(res) + except LPError, Base64Error: + let e = getCurrentException() + return err(e.msg) -func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} = +proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = + let res = SignedPeerRecord.parse(uri) + if res.isErr: + fatal "Cannot parse the signed peer.", error = res.error(), input = uri + quit QuitFailure + return res.get() + +func parse*(T: type NatConfig, p: string): Result[NatConfig, string] = case p.toLowerAscii of "any": - NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) + return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)) of "none": - NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) + return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)) of "upnp": - NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) + return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)) of "pmp": - NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) + return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)) else: if p.startsWith("extip:"): try: let ip = parseIpAddress(p[6 ..^ 1]) - NatConfig(hasExtIp: true, extIp: ip) + return ok(NatConfig(hasExtIp: true, extIp: ip)) except ValueError: let error = "Not a valid IP address: " & p[6 ..^ 1] - raise newException(ValueError, error) + return err(error) else: - let error = "Not a valid NAT option: " & p - raise newException(ValueError, error) + return err("Not a valid NAT option: " & p) + +proc parseCmdArg*(T: type NatConfig, p: string): T = + let res = NatConfig.parse(p) + if res.isErr: + fatal "Cannot parse the NAT config.", error = res.error(), input = p + quit QuitFailure + return res.get() proc completeCmdArg*(T: type NatConfig, val: string): seq[string] = return @[] @@ -566,25 +596,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] = proc parseCmdArg*(T: type EthAddress, address: string): T = EthAddress.init($address).get() -proc parseCmdArg*(T: type NBytes, val: string): T = +func parse*(T: type NBytes, p: string): Result[NBytes, string] = var num = 0'i64 - let count = parseSize(val, num, alwaysBin = true) + let count = parseSize(p, num, alwaysBin = true) if count == 0: - warn "Invalid number of bytes", nbytes = val + return err("Invalid number of bytes: " & p) + return ok(NBytes(num)) + +proc parseCmdArg*(T: type NBytes, val: string): T = + let res = NBytes.parse(val) + if res.isErr: + fatal "Cannot parse NBytes.", error = res.error(), input = val quit QuitFailure - NBytes(num) + return res.get() proc parseCmdArg*(T: type Duration, val: string): T = var dur: Duration let count = parseDuration(val, dur) if count == 0: - warn "Cannot parse duration", dur = dur + fatal "Cannot parse duration", dur = dur quit QuitFailure dur proc readValue*( r: var TomlReader, val: var EthAddress -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = val = EthAddress.init(r.readValue(string)).get() proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = @@ -595,7 +631,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = try: val = SignedPeerRecord.parseCmdArg(uri) except LPError as err: - warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg + fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg quit QuitFailure proc readValue*(r: var TomlReader, val: var MultiAddress) = @@ -607,12 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) = if res.isOk: val = res.get() else: - warn "Invalid MultiAddress", input = input, error = res.error() + fatal "Invalid MultiAddress", input = input, error = res.error() quit QuitFailure proc readValue*( r: var TomlReader, val: var NBytes -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = var value = 0'i64 var str = r.readValue(string) let count = parseSize(str, value, alwaysBin = true) @@ -623,7 +659,7 @@ proc readValue*( proc readValue*( r: var TomlReader, val: var ThreadCount -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = var str = r.readValue(string) try: val = parseCmdArg(ThreadCount, str) @@ -632,7 +668,7 @@ proc readValue*( proc readValue*( r: var TomlReader, val: var Duration -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = var str = r.readValue(string) var dur: Duration let count = parseDuration(str, dur) @@ -699,7 +735,7 @@ proc stripAnsi*(v: string): string = res -proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = +proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} = # Updates log levels (without clearing old ones) let directives = logLevel.split(";") try: @@ -768,7 +804,7 @@ proc setupLogging*(conf: CodexConf) = of LogKind.None: noOutput - when codex_enable_log_counter: + when storage_enable_log_counter: var counter = 0.uint64 proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) = inc(counter) @@ -779,15 +815,6 @@ proc setupLogging*(conf: CodexConf) = else: defaultChroniclesStream.outputs[0].writer = writer - try: - updateLogLevel(conf.logLevel) - except ValueError as err: - try: - stderr.write "Invalid value for --log-level. " & err.msg & "\n" - except IOError: - echo "Invalid value for --log-level. " & err.msg - quit QuitFailure - proc setupMetrics*(config: CodexConf) = if config.metricsEnabled: let metricsAddress = config.metricsAddress diff --git a/codex/contentids_exts.nim b/codex/contentids_exts.nim new file mode 100644 index 00000000..9ef6fbb5 --- /dev/null +++ b/codex/contentids_exts.nim @@ -0,0 +1,8 @@ +const ContentIdsExts = [ + multiCodec("codex-root"), + multiCodec("codex-manifest"), + multiCodec("codex-block"), + multiCodec("codex-slot-root"), + multiCodec("codex-proving-root"), + multiCodec("codex-slot-cell"), +] diff --git a/codex/contracts/Readme.md b/codex/contracts/Readme.md index cae2a4cc..2a746863 100644 --- a/codex/contracts/Readme.md +++ b/codex/contracts/Readme.md @@ -1,13 +1,13 @@ -Codex Contracts in Nim +Logos Storage Contracts in Nim ======================= -Nim API for the [Codex smart contracts][1]. +Nim API for the [Logos Storage smart contracts][1]. Usage ----- For a global overview of the steps involved in starting and fulfilling a -storage contract, see [Codex Contracts][1]. +storage contract, see [Logos Storage Contracts][1]. Smart contract -------------- @@ -144,5 +144,5 @@ await storage .markProofAsMissing(id, period) ``` -[1]: https://github.com/status-im/codex-contracts-eth/ -[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md +[1]: https://github.com/logos-storage/logos-storage-contracts-eth/ +[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index b7863539..1d4f57ba 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -1,3 +1,5 @@ +{.push raises: [].} + import std/times import pkg/ethers import pkg/questionable @@ -72,7 +74,9 @@ method now*(clock: OnChainClock): SecondsSince1970 = doAssert clock.started, "clock should be started before calling now()" return toUnix(getTime() + clock.offset) -method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} = +method waitUntil*( + clock: OnChainClock, time: SecondsSince1970 +) {.async: (raises: [CancelledError]).} = while (let difference = time - clock.now(); difference > 0): clock.newBlock.clear() discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference)) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index f676012b..884441d4 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,7 +1,6 @@ import std/strformat import std/strutils import pkg/ethers -import pkg/upraises import pkg/questionable import pkg/lrucache import ../utils/exceptions @@ -436,7 +435,7 @@ method canReserveSlot*( method subscribeRequests*( market: OnChainMarket, callback: OnRequest ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} = + proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in Request subscription", msg = eventErr.msg return @@ -450,7 +449,7 @@ method subscribeRequests*( method subscribeSlotFilled*( market: OnChainMarket, callback: OnSlotFilled ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} = + proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotFilled subscription", msg = eventErr.msg return @@ -477,7 +476,7 @@ method subscribeSlotFilled*( method subscribeSlotFreed*( market: OnChainMarket, callback: OnSlotFreed ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} = + proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotFreed subscription", msg = eventErr.msg return @@ -491,7 +490,7 @@ method subscribeSlotFreed*( method subscribeSlotReservationsFull*( market: OnChainMarket, callback: OnSlotReservationsFull ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} = + proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotReservationsFull subscription", msg = eventErr.msg @@ -506,7 +505,7 @@ method subscribeSlotReservationsFull*( method subscribeFulfillment( market: OnChainMarket, callback: OnFulfillment ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = + proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFulfillment subscription", msg = eventErr.msg return @@ -520,7 +519,7 @@ method subscribeFulfillment( method subscribeFulfillment( market: OnChainMarket, requestId: RequestId, callback: OnFulfillment ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = + proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFulfillment subscription", msg = eventErr.msg return @@ -535,7 +534,7 @@ method subscribeFulfillment( method subscribeRequestCancelled*( market: OnChainMarket, callback: OnRequestCancelled ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = + proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestCancelled subscription", msg = eventErr.msg return @@ -549,7 +548,7 @@ method subscribeRequestCancelled*( method subscribeRequestCancelled*( market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = + proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestCancelled subscription", msg = eventErr.msg return @@ -564,7 +563,7 @@ method subscribeRequestCancelled*( method subscribeRequestFailed*( market: OnChainMarket, callback: OnRequestFailed ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = + proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFailed subscription", msg = eventErr.msg return @@ -578,7 +577,7 @@ method subscribeRequestFailed*( method subscribeRequestFailed*( market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = + proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFailed subscription", msg = eventErr.msg return @@ -593,7 +592,7 @@ method subscribeRequestFailed*( method subscribeProofSubmission*( market: OnChainMarket, callback: OnProofSubmitted ): Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} = + proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} = without event =? eventResult, eventErr: error "There was an error in ProofSubmitted subscription", msg = eventErr.msg return diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 035e9648..f6f630e2 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -2,7 +2,7 @@ import std/hashes import std/sequtils import std/typetraits import pkg/contractabi -import pkg/nimcrypto +import pkg/nimcrypto/keccak import pkg/ethers/contracts/fields import pkg/questionable/results import pkg/stew/byteutils diff --git a/codex/discovery.nim b/codex/discovery.nim index 4a211c20..424ec9c0 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -10,13 +10,13 @@ {.push raises: [].} import std/algorithm +import std/net import std/sequtils import pkg/chronos import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope] import pkg/questionable import pkg/questionable/results -import pkg/stew/shims/net import pkg/contractabi/address as ca import pkg/codexdht/discv5/[routing_table, protocol as discv5] from pkg/nimcrypto import keccak256 @@ -43,6 +43,7 @@ type Discovery* = ref object of RootObj # record to advertice node connection information, this carry any # address that the node can be connected on dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information + isStarted: bool proc toNodeId*(cid: Cid): NodeId = ## Cid to discovery id @@ -157,7 +158,7 @@ method provide*( method removeProvider*( d: Discovery, peerId: PeerId -): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} = +): Future[void] {.base, async: (raises: [CancelledError]).} = ## Remove provider from providers table ## @@ -203,10 +204,15 @@ proc start*(d: Discovery) {.async: (raises: []).} = try: d.protocol.open() await d.protocol.start() + d.isStarted = true except CatchableError as exc: error "Error starting discovery", exc = exc.msg proc stop*(d: Discovery) {.async: (raises: []).} = + if not d.isStarted: + warn "Discovery not started, skipping stop" + return + try: await noCancel d.protocol.closeWait() except CatchableError as exc: diff --git a/codex/erasure.nim b/codex/erasure.nim index 5dfebcd4..f9310a40 100644 --- a/codex/erasure.nim +++ b/codex/erasure.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/erasure/backend.nim b/codex/erasure/backend.nim index 32009829..9885326f 100644 --- a/codex/erasure/backend.nim +++ b/codex/erasure/backend.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,10 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import ../stores diff --git a/codex/erasure/backends/leopard.nim b/codex/erasure/backends/leopard.nim index a0016570..66bc059d 100644 --- a/codex/erasure/backends/leopard.nim +++ b/codex/erasure/backends/leopard.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 95516500..c91968f9 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,10 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import std/[sugar, atomics, sequtils] @@ -428,7 +425,7 @@ proc encodeData( return failure("Unable to store block!") idx.inc(params.steps) - without tree =? CodexTree.init(cids[]), err: + without tree =? (await CodexTree.init(self.taskPool, cids[])), err: return failure(err) without treeCid =? tree.rootCid, err: @@ -649,7 +646,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err: return failure(err) - without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err: + without tree =? + (await CodexTree.init(self.taskPool, cids[0 ..< encoded.originalBlocksCount])), err: return failure(err) without treeCid =? tree.rootCid, err: @@ -680,7 +678,8 @@ proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} = without (cids, _) =? (await self.decodeInternal(encoded)), err: return failure(err) - without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err: + without tree =? + (await CodexTree.init(self.taskPool, cids[0 ..< encoded.originalBlocksCount])), err: return failure(err) without treeCid =? tree.rootCid, err: diff --git a/codex/errors.nim b/codex/errors.nim index 9ce52df7..655aec87 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/logutils.nim b/codex/logutils.nim index 0d10b0fb..f3b98548 100644 --- a/codex/logutils.nim +++ b/codex/logutils.nim @@ -11,7 +11,7 @@ ## 4. Remove usages of `nim-json-serialization` from the codebase ## 5. Remove need to declare `writeValue` for new types ## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent -## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467) +## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467) ## ## When declaring a new type, one should consider importing the `codex/logutils` ## module, and specifying `formatIt`. If textlines log output and json log output diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index 30e0c7ca..b899bb09 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,11 +9,9 @@ # This module implements serialization and deserialization of Manifest -import pkg/upraises import times -push: - {.upraises: [].} +{.push raises: [].} import std/tables import std/sequtils diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index cbb0bace..fadc8c88 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,10 +9,7 @@ # This module defines all operations on Manifest -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/libp2p/protobuf/minprotobuf import pkg/libp2p/[cid, multihash, multicodec] diff --git a/codex/market.nim b/codex/market.nim index 0fe69347..968f204e 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -1,5 +1,4 @@ import pkg/chronos -import pkg/upraises import pkg/questionable import pkg/ethers/erc20 import ./contracts/requests @@ -23,15 +22,15 @@ type ProofInvalidError* = object of MarketError Subscription* = ref object of RootObj OnRequest* = - proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} - OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} - OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} + proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].} + OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].} + OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].} + OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].} OnSlotReservationsFull* = - proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} - OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} + proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].} + OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].} + OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].} + OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].} ProofChallenge* = array[32, byte] # Marketplace events -- located here due to the Market abstraction @@ -275,7 +274,7 @@ method subscribeProofSubmission*( ): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} = +method unsubscribe*(subscription: Subscription) {.base, async.} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( diff --git a/codex/merkletree/codex/coders.nim b/codex/merkletree/codex/coders.nim index 1d50707c..d81979c1 100644 --- a/codex/merkletree/codex/coders.nim +++ b/codex/merkletree/codex/coders.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,10 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/libp2p import pkg/questionable diff --git a/codex/merkletree/codex/codex.nim b/codex/merkletree/codex/codex.nim index 0eec92e4..173befd0 100644 --- a/codex/merkletree/codex/codex.nim +++ b/codex/merkletree/codex/codex.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -10,16 +10,18 @@ {.push raises: [].} import std/bitops -import std/sequtils +import std/[atomics, sequtils] import pkg/questionable import pkg/questionable/results import pkg/libp2p/[cid, multicodec, multihash] import pkg/constantine/hashes +import pkg/taskpools +import pkg/chronos/threadsync import ../../utils import ../../rng import ../../errors -import ../../blocktype +import ../../codextypes from ../../utils/digest import digestBytes @@ -47,28 +49,6 @@ type CodexProof* = ref object of ByteProof mcodec*: MultiCodec -# CodeHashes is not exported from libp2p -# So we need to recreate it instead of -proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} = - for item in HashesList: - result[item.mcodec] = item - -const CodeHashes = initMultiHashCodeTable() - -func mhash*(mcodec: MultiCodec): ?!MHash = - let mhash = CodeHashes.getOrDefault(mcodec) - - if isNil(mhash.coder): - return failure "Invalid multihash codec" - - success mhash - -func digestSize*(self: (CodexTree or CodexProof)): int = - ## Number of leaves - ## - - self.mhash.size - func getProof*(self: CodexTree, index: int): ?!CodexProof = var proof = CodexProof(mcodec: self.mcodec) @@ -128,38 +108,47 @@ proc `$`*(self: CodexProof): string = "CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " & $self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )" -func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash = +func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash = ## Compress two hashes ## - - # Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing - # See: https://github.com/codex-storage/nim-codex/issues/1162 - let input = @x & @y & @[key.byte] - var digest = hashes.sha256.hash(input) + let digest = ?MultiHash.digest(codec, input).mapFailure + success digest.digestBytes - success @digest - -func init*( - _: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash] -): ?!CodexTree = +func initTree(mcodec: MultiCodec, leaves: openArray[ByteHash]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let - mhash = ?mcodec.mhash() compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = - compress(x, y, key, mhash) - Zero: ByteHash = newSeq[byte](mhash.size) + compress(x, y, key, mcodec) + digestSize = ?mcodec.digestSize.mapFailure + Zero: ByteHash = newSeq[byte](digestSize) - if mhash.size != leaves[0].len: + if digestSize != leaves[0].len: return failure "Invalid hash length" - var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) - - self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true) + var self = CodexTree(mcodec: mcodec) + ?self.prepare(compressor, Zero, leaves) success self +func init*( + _: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash] +): ?!CodexTree = + let tree = ?initTree(mcodec, leaves) + ?tree.compute() + success tree + +proc init*( + _: type CodexTree, + tp: Taskpool, + mcodec: MultiCodec = Sha256HashCodec, + leaves: seq[ByteHash], +): Future[?!CodexTree] {.async: (raises: [CancelledError]).} = + let tree = ?initTree(mcodec, leaves) + ?await tree.compute(tp) + success tree + func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" @@ -170,6 +159,18 @@ func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree = CodexTree.init(mcodec, leaves) +proc init*( + _: type CodexTree, tp: Taskpool, leaves: seq[MultiHash] +): Future[?!CodexTree] {.async: (raises: [CancelledError]).} = + if leaves.len == 0: + return failure "Empty leaves" + + let + mcodec = leaves[0].mcodec + leaves = leaves.mapIt(it.digestBytes) + + await CodexTree.init(tp, mcodec, leaves) + func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" @@ -180,6 +181,18 @@ func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree = CodexTree.init(mcodec, leaves) +proc init*( + _: type CodexTree, tp: Taskpool, leaves: seq[Cid] +): Future[?!CodexTree] {.async: (raises: [CancelledError]).} = + if leaves.len == 0: + return failure("Empty leaves") + + let + mcodec = (?leaves[0].mhash.mapFailure).mcodec + leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes) + + await CodexTree.init(tp, mcodec, leaves) + proc fromNodes*( _: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, @@ -190,23 +203,16 @@ proc fromNodes*( return failure "Empty nodes" let - mhash = ?mcodec.mhash() - Zero = newSeq[byte](mhash.size) + digestSize = ?mcodec.digestSize.mapFailure + Zero = newSeq[byte](digestSize) compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = - compress(x, y, key, mhash) + compress(x, y, key, mcodec) - if mhash.size != nodes[0].len: + if digestSize != nodes[0].len: return failure "Invalid hash length" - var - self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec) - layer = nleaves - pos = 0 - - while pos < nodes.len: - self.layers.add(nodes[pos ..< (pos + layer)]) - pos += layer - layer = divUp(layer, 2) + var self = CodexTree(mcodec: mcodec) + ?self.fromNodes(compressor, Zero, nodes, nleaves) let index = Rng.instance.rand(nleaves - 1) @@ -228,10 +234,10 @@ func init*( return failure "Empty nodes" let - mhash = ?mcodec.mhash() - Zero = newSeq[byte](mhash.size) + digestSize = ?mcodec.digestSize.mapFailure + Zero = newSeq[byte](digestSize) compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} = - compress(x, y, key, mhash) + compress(x, y, key, mcodec) success CodexProof( compress: compressor, diff --git a/codex/merkletree/merkletree.nim b/codex/merkletree/merkletree.nim index f1905bec..56491d84 100644 --- a/codex/merkletree/merkletree.nim +++ b/codex/merkletree/merkletree.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,19 +9,58 @@ {.push raises: [].} -import std/bitops +import std/[bitops, atomics, sequtils] +import stew/assign2 import pkg/questionable/results +import pkg/taskpools +import pkg/chronos +import pkg/chronos/threadsync import ../errors +import ../utils/sharedbuf + +export sharedbuf + +template nodeData( + data: openArray[byte], offsets: openArray[int], nodeSize, i, j: int +): openArray[byte] = + ## Bytes of the j'th entry of the i'th level in the tree, starting with the + ## leaves (at level 0). + let start = (offsets[i] + j) * nodeSize + data.toOpenArray(start, start + nodeSize - 1) type + # TODO hash functions don't fail - removing the ?! from this function would + # significantly simplify the flow below CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].} - MerkleTree*[H, K] = ref object of RootObj - layers*: seq[seq[H]] - compress*: CompressFn[H, K] - zero*: H + CompressData[H, K] = object + fn: CompressFn[H, K] + nodeSize: int + zero: H + + MerkleTreeObj*[H, K] = object of RootObj + store*: seq[byte] + ## Flattened merkle tree where hashes are assumed to be trivial bytes and + ## uniform in size. + ## + ## Each layer of the tree is stored serially starting with the leaves and + ## ending with the root. + ## + ## Beacuse the tree might not be balanced, `layerOffsets` contains the + ## index of the starting point of each level, for easy lookup. + layerOffsets*: seq[int] + ## Starting point of each level in the tree, starting from the leaves - + ## multiplied by the entry size, this is the offset in the payload where + ## the entries of that level start + ## + ## For example, a tree with 4 leaves will have [0, 4, 6] stored here. + ## + ## See nodesPerLevel function, from whic this sequence is derived + compress*: CompressData[H, K] + + MerkleTree*[H, K] = ref MerkleTreeObj[H, K] MerkleProof*[H, K] = ref object of RootObj index*: int # linear index of the leaf, starting from 0 @@ -30,33 +69,99 @@ type compress*: CompressFn[H, K] # compress function zero*: H # zero value +func levels*[H, K](self: MerkleTree[H, K]): int = + return self.layerOffsets.len + func depth*[H, K](self: MerkleTree[H, K]): int = - return self.layers.len - 1 + return self.levels() - 1 + +func nodesInLayer(offsets: openArray[int], layer: int): int = + if layer == offsets.high: + 1 + else: + offsets[layer + 1] - offsets[layer] + +func nodesInLayer(self: MerkleTree | MerkleTreeObj, layer: int): int = + self.layerOffsets.nodesInLayer(layer) func leavesCount*[H, K](self: MerkleTree[H, K]): int = - return self.layers[0].len + return self.nodesInLayer(0) -func levels*[H, K](self: MerkleTree[H, K]): int = - return self.layers.len +func nodesPerLevel(nleaves: int): seq[int] = + ## Given a number of leaves, return a seq with the number of nodes at each + ## layer of the tree (from the bottom/leaves to the root) + ## + ## Ie For a tree of 4 leaves, return `[4, 2, 1]` + if nleaves <= 0: + return @[] + elif nleaves == 1: + return @[1, 1] # leaf and root -func leaves*[H, K](self: MerkleTree[H, K]): seq[H] = - return self.layers[0] + var nodes: seq[int] = @[] + var m = nleaves + while true: + nodes.add(m) + if m == 1: + break + # Next layer size is ceil(m/2) + m = (m + 1) shr 1 -iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] = - for layer in self.layers: - yield layer + nodes + +func layerOffsets(nleaves: int): seq[int] = + ## Given a number of leaves, return a seq of the starting offsets of each + ## layer in the node store that results from flattening the binary tree + ## + ## Ie For a tree of 4 leaves, return `[0, 4, 6]` + let nodes = nodesPerLevel(nleaves) + var tot = 0 + let offsets = nodes.mapIt: + let cur = tot + tot += it + cur + offsets + +template nodeData(self: MerkleTreeObj, i, j: int): openArray[byte] = + ## Bytes of the j'th node of the i'th level in the tree, starting with the + ## leaves (at level 0). + self.store.nodeData(self.layerOffsets, self.compress.nodeSize, i, j) + +func layer*[H, K]( + self: MerkleTree[H, K], layer: int +): seq[H] {.deprecated: "Expensive".} = + var nodes = newSeq[H](self.nodesInLayer(layer)) + for i, h in nodes.mpairs: + assign(h, self[].nodeData(layer, i)) + return nodes + +func leaves*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} = + self.layer(0) + +iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} = + for i in 0 ..< self.layerOffsets.len: + yield self.layer(i) + +proc layers*[H, K](self: MerkleTree[H, K]): seq[seq[H]] {.deprecated: "Expensive".} = + for l in self.layers(): + result.add l iterator nodes*[H, K](self: MerkleTree[H, K]): H = - for layer in self.layers: - for node in layer: + ## Iterate over the nodes of each layer starting with the leaves + var node: H + for i in 0 ..< self.layerOffsets.len: + let nodesInLayer = self.nodesInLayer(i) + for j in 0 ..< nodesInLayer: + assign(node, self[].nodeData(i, j)) yield node func root*[H, K](self: MerkleTree[H, K]): ?!H = - let last = self.layers[^1] - if last.len != 1: + mixin assign + if self.layerOffsets.len == 0: return failure "invalid tree" - return success last[0] + var h: H + assign(h, self[].nodeData(self.layerOffsets.high(), 0)) + return success h func getProof*[H, K]( self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K] @@ -72,18 +177,19 @@ func getProof*[H, K]( var m = nleaves for i in 0 ..< depth: let j = k xor 1 - path[i] = - if (j < m): - self.layers[i][j] - else: - self.zero + + if (j < m): + assign(path[i], self[].nodeData(i, j)) + else: + path[i] = self.compress.zero + k = k shr 1 m = (m + 1) shr 1 proof.index = index proof.path = path proof.nleaves = nleaves - proof.compress = self.compress + proof.compress = self.compress.fn success() @@ -122,32 +228,169 @@ func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H = func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool = success bool(root == ?proof.reconstructRoot(leaf)) -func merkleTreeWorker*[H, K]( - self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool -): ?!seq[seq[H]] = - let a = low(xs) - let b = high(xs) - let m = b - a + 1 +func fromNodes*[H, K]( + self: MerkleTree[H, K], + compressor: CompressFn, + zero: H, + nodes: openArray[H], + nleaves: int, +): ?!void = + mixin assign + + if nodes.len < 2: # At least leaf and root + return failure "Not enough nodes" + + if nleaves == 0: + return failure "No leaves" + + self.compress = CompressData[H, K](fn: compressor, nodeSize: nodes[0].len, zero: zero) + self.layerOffsets = layerOffsets(nleaves) + + if self.layerOffsets[^1] + 1 != nodes.len: + return failure "bad node count" + + self.store = newSeqUninit[byte](nodes.len * self.compress.nodeSize) + + for i in 0 ..< nodes.len: + assign( + self[].store.toOpenArray( + i * self.compress.nodeSize, (i + 1) * self.compress.nodeSize - 1 + ), + nodes[i], + ) + + success() + +func merkleTreeWorker[H, K]( + store: var openArray[byte], + offsets: openArray[int], + compress: CompressData[H, K], + layer: int, + isBottomLayer: static bool, +): ?!void = + ## Worker used to compute the merkle tree from the leaves that are assumed to + ## already be stored at the beginning of the `store`, as done by `prepare`. + + # Throughout, we use `assign` to convert from H to bytes and back, assuming + # this assignment can be done somewhat efficiently (ie memcpy) - because + # the code must work with multihash where len(H) is can differ, we cannot + # simply use a fixed-size array here. + mixin assign + + template nodeData(i, j: int): openArray[byte] = + # Pick out the bytes of node j in layer i + store.nodeData(offsets, compress.nodeSize, i, j) + + let m = offsets.nodesInLayer(layer) when not isBottomLayer: if m == 1: - return success @[@xs] + return success() let halfn: int = m div 2 let n: int = 2 * halfn let isOdd: bool = (n != m) - var ys: seq[H] - if not isOdd: - ys = newSeq[H](halfn) - else: - ys = newSeq[H](halfn + 1) + # Because the compression function we work with works with H and not bytes, + # we need to extract H from the raw data - a little abstraction tax that + # ensures that properties like alignment of H are respected. + var a, b, tmp: H for i in 0 ..< halfn: const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone - ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key) + + assign(a, nodeData(layer, i * 2)) + assign(b, nodeData(layer, i * 2 + 1)) + + tmp = ?compress.fn(a, b, key = key) + + assign(nodeData(layer + 1, i), tmp) + if isOdd: const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd - ys[halfn] = ?self.compress(xs[n], self.zero, key = key) - success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false) + assign(a, nodeData(layer, n)) + + tmp = ?compress.fn(a, compress.zero, key = key) + + assign(nodeData(layer + 1, halfn), tmp) + + merkleTreeWorker(store, offsets, compress, layer + 1, false) + +proc merkleTreeWorker[H, K]( + store: SharedBuf[byte], + offsets: SharedBuf[int], + compress: ptr CompressData[H, K], + signal: ThreadSignalPtr, +): bool = + defer: + discard signal.fireSync() + + let res = merkleTreeWorker( + store.toOpenArray(), offsets.toOpenArray(), compress[], 0, isBottomLayer = true + ) + + return res.isOk() + +func prepare*[H, K]( + self: MerkleTree[H, K], compressor: CompressFn, zero: H, leaves: openArray[H] +): ?!void = + ## Prepare the instance for computing the merkle tree of the given leaves using + ## the given compression function. After preparation, `compute` should be + ## called to perform the actual computation. `leaves` will be copied into the + ## tree so they can be freed after the call. + + if leaves.len == 0: + return failure "No leaves" + + self.compress = + CompressData[H, K](fn: compressor, nodeSize: leaves[0].len, zero: zero) + self.layerOffsets = layerOffsets(leaves.len) + + self.store = newSeqUninit[byte]((self.layerOffsets[^1] + 1) * self.compress.nodeSize) + + for j in 0 ..< leaves.len: + assign(self[].nodeData(0, j), leaves[j]) + + return success() + +proc compute*[H, K](self: MerkleTree[H, K]): ?!void = + merkleTreeWorker( + self.store, self.layerOffsets, self.compress, 0, isBottomLayer = true + ) + +proc compute*[H, K]( + self: MerkleTree[H, K], tp: Taskpool +): Future[?!void] {.async: (raises: []).} = + if tp.numThreads == 1: + # With a single thread, there's no point creating a separate task + return self.compute() + + # TODO this signal would benefit from reuse across computations + without signal =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + signal.close().expect("closing once works") + + let res = tp.spawn merkleTreeWorker( + SharedBuf.view(self.store), + SharedBuf.view(self.layerOffsets), + addr self.compress, + signal, + ) + + # To support cancellation, we'd have to ensure the task we posted to taskpools + # exits early - since we're not doing that, block cancellation attempts + try: + await noCancel signal.wait() + except AsyncError as exc: + # Since we initialized the signal, the OS or chronos is misbehaving. In any + # case, it would mean the task is still running which would cause a memory + # a memory violation if we let it run - panic instead + raiseAssert "Could not wait for signal, was it initialized? " & exc.msg + + if not res.sync(): + return failure("merkle tree task failed") + + return success() diff --git a/codex/merkletree/poseidon2.nim b/codex/merkletree/poseidon2.nim index 56ad1e4d..436cd273 100644 --- a/codex/merkletree/poseidon2.nim +++ b/codex/merkletree/poseidon2.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,9 +9,11 @@ {.push raises: [].} -import std/sequtils +import std/[sequtils, atomics] import pkg/poseidon2 +import pkg/taskpools +import pkg/chronos/threadsync import pkg/constantine/math/io/io_fields import pkg/constantine/platforms/abstractions import pkg/questionable/results @@ -44,6 +46,17 @@ type Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum] Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum] +proc len*(v: Poseidon2Hash): int = + sizeof(v) + +proc assign*(v: var openArray[byte], h: Poseidon2Hash) = + doAssert v.len == sizeof(h) + copyMem(addr v[0], addr h, sizeof(h)) + +proc assign*(h: var Poseidon2Hash, v: openArray[byte]) = + doAssert v.len == sizeof(h) + copyMem(addr h, addr v[0], sizeof(h)) + proc `$`*(self: Poseidon2Tree): string = let root = if self.root.isOk: self.root.get.toHex else: "none" "Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & @@ -63,7 +76,7 @@ converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash = of KeyOdd: KeyOddF of KeyOddAndBottomLayer: KeyOddAndBottomLayerF -func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree = +proc initTree(leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree = if leaves.len == 0: return failure "Empty leaves" @@ -72,34 +85,43 @@ func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2 ): ?!Poseidon2Hash {.noSideEffect.} = success compress(x, y, key.toKey) - var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero) + var self = Poseidon2Tree() + ?self.prepare(compressor, Poseidon2Zero, leaves) + success self + +func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree = + let self = ?initTree(leaves) + ?self.compute() + + success self + +proc init*( + _: type Poseidon2Tree, tp: Taskpool, leaves: seq[Poseidon2Hash] +): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} = + let self = ?initTree(leaves) + + ?await self.compute(tp) - self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true) success self func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree = Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it))) +proc init*( + _: type Poseidon2Tree, tp: Taskpool, leaves: seq[array[31, byte]] +): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} = + await Poseidon2Tree.init(tp, leaves.mapIt(Poseidon2Hash.fromBytes(it))) + proc fromNodes*( _: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int ): ?!Poseidon2Tree = - if nodes.len == 0: - return failure "Empty nodes" - let compressor = proc( x, y: Poseidon2Hash, key: PoseidonKeysEnum ): ?!Poseidon2Hash {.noSideEffect.} = success compress(x, y, key.toKey) - var - self = Poseidon2Tree(compress: compressor, zero: zero) - layer = nleaves - pos = 0 - - while pos < nodes.len: - self.layers.add(nodes[pos ..< (pos + layer)]) - pos += layer - layer = divUp(layer, 2) + let self = Poseidon2Tree() + ?self.fromNodes(compressor, Poseidon2Zero, nodes, nleaves) let index = Rng.instance.rand(nleaves - 1) diff --git a/codex/multicodec_exts.nim b/codex/multicodec_exts.nim new file mode 100644 index 00000000..b14fbbe6 --- /dev/null +++ b/codex/multicodec_exts.nim @@ -0,0 +1,11 @@ +const CodecExts = [ + ("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge + ("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize + ("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress] + ("codex-manifest", 0xCD01), + ("codex-block", 0xCD02), + ("codex-root", 0xCD03), + ("codex-slot-root", 0xCD04), + ("codex-proving-root", 0xCD05), + ("codex-slot-cell", 0xCD06), +] diff --git a/codex/multihash_exts.nim b/codex/multihash_exts.nim new file mode 100644 index 00000000..05a5305d --- /dev/null +++ b/codex/multihash_exts.nim @@ -0,0 +1,40 @@ +import blscurve/bls_public_exports +import pkg/constantine/hashes +import poseidon2 + +proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) = + # Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing + # See: https://github.com/logos-storage/logos-storage-nim/issues/1162 + if len(output) > 0: + let digest = hashes.sha256.hash(data) + copyMem(addr output[0], addr digest[0], 32) + +proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) = + if len(output) > 0: + var digest = poseidon2.Sponge.digest(data).toBytes() + copyMem(addr output[0], addr digest[0], uint(len(output))) + +proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) = + if len(output) > 0: + var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes() + copyMem(addr output[0], addr digest[0], uint(len(output))) + +const Sha2256MultiHash* = MHash( + mcodec: multiCodec("sha2-256"), + size: sha256.sizeDigest, + coder: sha2_256hash_constantine, +) +const HashExts = [ + # override sha2-256 hash function + Sha2256MultiHash, + MHash( + mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"), + size: 32, + coder: poseidon2_sponge_rate2, + ), + MHash( + mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"), + size: 32, + coder: poseidon2_merkle_2kb_sponge, + ), +] diff --git a/codex/namespaces.nim b/codex/namespaces.nim index c159ab1a..169be184 100644 --- a/codex/namespaces.nim +++ b/codex/namespaces.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/nat.nim b/codex/nat.nim index d022dad6..2038ca3d 100644 --- a/codex/nat.nim +++ b/codex/nat.nim @@ -10,10 +10,10 @@ import std/[options, os, strutils, times, net, atomics], - stew/shims/net as stewNet, - stew/[objects, results], + stew/[objects], nat_traversal/[miniupnpc, natpmp], - json_serialization/std/net + json_serialization/std/net, + results import pkg/chronos import pkg/chronicles diff --git a/codex/node.nim b/codex/node.nim index 1ca471d5..3d249c4b 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -44,7 +44,7 @@ import ./indexingstrategy import ./utils import ./errors import ./logutils -import ./utils/asynciter +import ./utils/safeasynciter import ./utils/trackedfutures export logutils @@ -52,7 +52,10 @@ export logutils logScope: topics = "codex node" -const DefaultFetchBatch = 10 +const + DefaultFetchBatch = 1024 + MaxOnBatchBlocks = 128 + BatchRefillThreshold = 0.75 # Refill when 75% of window completes type Contracts* = @@ -72,15 +75,15 @@ type contracts*: Contracts clock*: Clock storage*: Contracts - taskpool: Taskpool + taskPool: Taskpool trackedFutures: TrackedFutures CodexNodeRef* = ref CodexNode OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].} - BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {. - gcsafe, async: (raises: [CancelledError]) - .} + BatchProc* = + proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).} + OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].} func switch*(self: CodexNodeRef): Switch = return self.switch @@ -186,34 +189,62 @@ proc fetchBatched*( # (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i)) # ) - while not iter.finished: - let blockFutures = collect: - for i in 0 ..< batchSize: - if not iter.finished: - let address = BlockAddress.init(cid, iter.next()) - if not (await address in self.networkStore) or fetchLocal: - self.networkStore.getBlock(address) + # Sliding window: maintain batchSize blocks in-flight + let + refillThreshold = int(float(batchSize) * BatchRefillThreshold) + refillSize = max(refillThreshold, 1) + maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks) - if blockFutures.len == 0: + var + blockData: seq[bt.Block] + failedBlocks = 0 + successfulBlocks = 0 + completedInWindow = 0 + + var addresses = newSeqOfCap[BlockAddress](batchSize) + for i in 0 ..< batchSize: + if not iter.finished: + let address = BlockAddress.init(cid, iter.next()) + if fetchLocal or not (await address in self.networkStore): + addresses.add(address) + + var blockResults = await self.networkStore.getBlocks(addresses) + + while not blockResults.finished: + without blk =? await blockResults.next(), err: + inc(failedBlocks) continue - without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err: - trace "Some blocks failed to fetch", err = err.msg - return failure(err) + inc(successfulBlocks) + inc(completedInWindow) - let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value) + if not onBatch.isNil: + blockData.add(blk) + if blockData.len >= maxCallbackBlocks: + if batchErr =? (await onBatch(blockData)).errorOption: + return failure(batchErr) + blockData = @[] - let numOfFailedBlocks = blockResults.len - blocks.len - if numOfFailedBlocks > 0: - return - failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")") + if completedInWindow >= refillThreshold and not iter.finished: + var refillAddresses = newSeqOfCap[BlockAddress](refillSize) + for i in 0 ..< refillSize: + if not iter.finished: + let address = BlockAddress.init(cid, iter.next()) + if fetchLocal or not (await address in self.networkStore): + refillAddresses.add(address) - if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption: + if refillAddresses.len > 0: + blockResults = + chain(blockResults, await self.networkStore.getBlocks(refillAddresses)) + completedInWindow = 0 + + if failedBlocks > 0: + return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")") + + if not onBatch.isNil and blockData.len > 0: + if batchErr =? (await onBatch(blockData)).errorOption: return failure(batchErr) - if not iter.finished: - await sleepAsync(1.millis) - success() proc fetchBatched*( @@ -294,7 +325,7 @@ proc streamEntireDataset( try: # Spawn an erasure decoding job let erasure = Erasure.new( - self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskPool ) without _ =? (await erasure.decode(manifest)), error: error "Unable to erasure decode manifest", manifestCid, exc = error.msg @@ -403,6 +434,7 @@ proc store*( filename: ?string = string.none, mimetype: ?string = string.none, blockSize = DefaultBlockSize, + onBlockStored: OnBlockStoredProc = nil, ): Future[?!Cid] {.async.} = ## Save stream contents as dataset with given blockSize ## to nodes's BlockStore, and return Cid of its manifest @@ -432,6 +464,9 @@ proc store*( if err =? (await self.networkStore.putBlock(blk)).errorOption: error "Unable to store block", cid = blk.cid, err = err.msg return failure(&"Unable to store block {blk.cid}") + + if not onBlockStored.isNil: + onBlockStored(chunk) except CancelledError as exc: raise exc except CatchableError as exc: @@ -439,7 +474,7 @@ proc store*( finally: await stream.close() - without tree =? CodexTree.init(cids), err: + without tree =? (await CodexTree.init(self.taskPool, cids)), err: return failure(err) without treeCid =? tree.rootCid(CIDv1, dataCodec), err: @@ -533,14 +568,15 @@ proc setupRequest( # Erasure code the dataset according to provided parameters let erasure = Erasure.new( - self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskPool ) without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: trace "Unable to erasure code dataset" return failure(error) - without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err: + without builder =? + Poseidon2Builder.new(self.networkStore.localStore, encoded, self.taskPool), err: trace "Unable to create slot builder" return failure(err) @@ -644,7 +680,9 @@ proc onStore( return failure(err) without builder =? - Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err: + Poseidon2Builder.new( + self.networkStore, manifest, self.taskPool, manifest.verifiableStrategy + ), err: trace "Unable to create slots builder", err = err.msg return failure(err) @@ -679,7 +717,7 @@ proc onStore( trace "start repairing slot", slotIdx try: let erasure = Erasure.new( - self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskPool ) if err =? (await erasure.repair(manifest)).errorOption: error "Unable to erasure decode repairing manifest", @@ -846,7 +884,7 @@ proc start*(self: CodexNodeRef) {.async.} = self.contracts.validator = ValidatorInteractions.none self.networkId = self.switch.peerInfo.peerId - notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs + notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs proc stop*(self: CodexNodeRef) {.async.} = trace "Stopping node" @@ -871,6 +909,7 @@ proc stop*(self: CodexNodeRef) {.async.} = if not self.clock.isNil: await self.clock.stop() +proc close*(self: CodexNodeRef) {.async.} = if not self.networkStore.isNil: await self.networkStore.close @@ -880,7 +919,7 @@ proc new*( networkStore: NetworkStore, engine: BlockExcEngine, discovery: Discovery, - taskpool: Taskpool, + taskPool: Taskpool, prover = Prover.none, contracts = Contracts.default, ): CodexNodeRef = @@ -893,7 +932,7 @@ proc new*( engine: engine, prover: prover, discovery: discovery, - taskPool: taskpool, + taskPool: taskPool, contracts: contracts, trackedFutures: TrackedFutures(), ) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index adab821b..5650f230 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,10 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import std/sequtils import std/mimetypes @@ -183,7 +180,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string = proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) = let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion - router.api(MethodOptions, "/api/codex/v1/data") do( + router.api(MethodOptions, "/api/storage/v1/data") do( resp: HttpResponseRef ) -> RestApiResponse: if corsOrigin =? allowedOrigin: @@ -195,7 +192,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.status = Http204 await resp.sendBody("") - router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse: + router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse: ## Upload a file in a streaming manner ## @@ -257,11 +254,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute finally: await reader.closeWait() - router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse: let json = await formatManifestBlocks(node) return RestApiResponse.response($json, contentType = "application/json") - router.api(MethodOptions, "/api/codex/v1/data/{cid}") do( + router.api(MethodOptions, "/api/storage/v1/data/{cid}") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: if corsOrigin =? allowedOrigin: @@ -270,7 +267,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.status = Http204 await resp.sendBody("") - router.api(MethodGet, "/api/codex/v1/data/{cid}") do( + router.api(MethodGet, "/api/storage/v1/data/{cid}") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) @@ -286,7 +283,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute await node.retrieveCid(cid.get(), local = true, resp = resp) - router.api(MethodDelete, "/api/codex/v1/data/{cid}") do( + router.api(MethodDelete, "/api/storage/v1/data/{cid}") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: ## Deletes either a single block or an entire dataset @@ -307,7 +304,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.status = Http204 await resp.sendBody("") - router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( + router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: ## Download a file from the network to the local node @@ -328,7 +325,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute let json = %formatManifest(cid.get(), manifest) return RestApiResponse.response($json, contentType = "application/json") - router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do( + router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: ## Download a file from the network in a streaming @@ -347,7 +344,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition") await node.retrieveCid(cid.get(), local = false, resp = resp) - router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( + router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: ## Download only the manifest. @@ -365,7 +362,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute let json = %formatManifest(cid.get(), manifest) return RestApiResponse.response($json, contentType = "application/json") - router.api(MethodGet, "/api/codex/v1/data/{cid}/exists") do( + router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: ## Only test if the give CID is available in the local store @@ -381,7 +378,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute let json = %*{$cid: hasCid} return RestApiResponse.response($json, contentType = "application/json") - router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse: let json = %RestRepoStore( totalBlocks: repoStore.totalBlocks, @@ -394,7 +391,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) ## Returns active slots for the host @@ -412,7 +409,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do( + router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do( slotId: SlotId ) -> RestApiResponse: ## Returns active slot with id {slotId} for the host. Returns 404 if the @@ -442,7 +439,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = restAgent.toJson, contentType = "application/json", headers = headers ) - router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse: ## Returns storage that is for sale var headers = buildCorsHeaders("GET", allowedOrigin) @@ -464,7 +461,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse: + router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse: ## Add available storage to sell. ## Every time Availability's offer finishes, its capacity is ## returned to the availability. @@ -544,7 +541,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do( + router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do( id: AvailabilityId, resp: HttpResponseRef ) -> RestApiResponse: if corsOrigin =? allowedOrigin: @@ -553,7 +550,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = resp.status = Http204 await resp.sendBody("") - router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do( + router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do( id: AvailabilityId ) -> RestApiResponse: ## Updates Availability. @@ -641,7 +638,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500) - router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do( + router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do( id: AvailabilityId ) -> RestApiResponse: ## Gets Availability's reservations. @@ -685,7 +682,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do( + router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do( cid: Cid ) -> RestApiResponse: var headers = buildCorsHeaders("POST", allowedOrigin) @@ -795,7 +792,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do( + router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do( id: PurchaseId ) -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) @@ -827,7 +824,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) try: @@ -849,7 +846,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = ## various node management api's ## - router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse: ## Returns node SPR in requested format, json or text. ## var headers = buildCorsHeaders("GET", allowedOrigin) @@ -872,7 +869,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse: ## Returns node's peerId in requested format, json or text. ## var headers = buildCorsHeaders("GET", allowedOrigin) @@ -891,7 +888,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do( + router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do( peerId: PeerId, addrs: seq[MultiAddress] ) -> RestApiResponse: ## Connect to a peer @@ -929,7 +926,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse: + router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse: ## Print rudimentary node information ## var headers = buildCorsHeaders("GET", allowedOrigin) @@ -949,7 +946,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = "", "announceAddresses": node.discovery.announceAddrs, "table": table, - "codex": { + "storage": { "version": $codexVersion, "revision": $codexRevision, "contracts": $codexContractsRevision, @@ -964,7 +961,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do( + router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do( level: Option[string] ) -> RestApiResponse: ## Set log level at run time @@ -990,8 +987,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500, headers = headers) - when codex_enable_api_debug_peers: - router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do( + when storage_enable_api_debug_peers: + router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do( peerId: PeerId ) -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index 319ce3d6..3f9388d6 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/rng.nim b/codex/rng.nim index 866d65f8..d36da9d7 100644 --- a/codex/rng.nim +++ b/codex/rng.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,10 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/libp2p/crypto/crypto import pkg/bearssl/rand diff --git a/codex/sales.nim b/codex/sales.nim index 353b70cc..bc4262f1 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -22,7 +22,7 @@ import ./utils/exceptions ## Sales holds a list of available storage that it may sell. ## ## When storage is requested on the market that matches availability, the Sales -## object will instruct the Codex node to persist the requested data. Once the +## object will instruct the Logos Storage node to persist the requested data. Once the ## data has been persisted, it uploads a proof of storage to the market in an ## attempt to win a storage contract. ## diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index f27a66fe..2e62fdf8 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -27,9 +27,7 @@ ## | UInt256 | totalRemainingCollateral | | ## +---------------------------------------------------+ -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import std/sequtils import std/sugar @@ -38,7 +36,6 @@ import std/sequtils import std/times import pkg/chronos import pkg/datastore -import pkg/nimcrypto import pkg/questionable import pkg/questionable/results import pkg/stint @@ -55,6 +52,8 @@ import ../units export requests export logutils +from nimcrypto import randomBytes + logScope: topics = "marketplace sales reservations" @@ -92,14 +91,10 @@ type repo: RepoStore OnAvailabilitySaved: ?OnAvailabilitySaved - GetNext* = proc(): Future[?seq[byte]] {. - upraises: [], gcsafe, async: (raises: [CancelledError]), closure - .} - IterDispose* = - proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.} - OnAvailabilitySaved* = proc(availability: Availability): Future[void] {. - upraises: [], gcsafe, async: (raises: []) - .} + GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.} + IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.} + OnAvailabilitySaved* = + proc(availability: Availability): Future[void] {.async: (raises: []).} StorableIter* = ref object finished*: bool next*: GetNext diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index 96137fe0..6584353c 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -2,7 +2,6 @@ import pkg/chronos import pkg/questionable import pkg/questionable/results import pkg/stint -import pkg/upraises import ../contracts/requests import ../errors import ../logutils @@ -113,14 +112,12 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = method onFulfilled*( agent: SalesAgent, requestId: RequestId -) {.base, gcsafe, upraises: [].} = +) {.base, gcsafe, raises: [].} = let cancelled = agent.data.cancelled if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished: cancelled.cancelSoon() -method onFailed*( - agent: SalesAgent, requestId: RequestId -) {.base, gcsafe, upraises: [].} = +method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} = without request =? agent.data.request: return if agent.data.requestId == requestId: @@ -128,7 +125,7 @@ method onFailed*( method onSlotFilled*( agent: SalesAgent, requestId: RequestId, slotIndex: uint64 -) {.base, gcsafe, upraises: [].} = +) {.base, gcsafe, raises: [].} = if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: agent.schedule(slotFilledEvent(requestId, slotIndex)) diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index ac0908df..5fd7099c 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -1,6 +1,5 @@ import pkg/questionable import pkg/questionable/results -import pkg/upraises import pkg/libp2p/cid import ../market @@ -24,21 +23,20 @@ type slotQueue*: SlotQueue simulateProofFailures*: int - BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {. - gcsafe, async: (raises: [CancelledError]) - .} + BlocksCb* = + proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).} OnStore* = proc( request: StorageRequest, expiry: SecondsSince1970, slot: uint64, blocksCb: BlocksCb, isRepairing: bool, - ): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).} + ): Future[?!void] {.async: (raises: [CancelledError]).} OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. - gcsafe, async: (raises: [CancelledError]) + async: (raises: [CancelledError]) .} OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {. - gcsafe, async: (raises: [CancelledError]) + async: (raises: [CancelledError]) .} OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].} OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].} diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index b6e77395..ad9a07db 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -15,8 +15,7 @@ logScope: topics = "marketplace slotqueue" type - OnProcessSlot* = - proc(item: SlotQueueItem): Future[void] {.gcsafe, async: (raises: []).} + OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).} # Non-ref obj copies value when assigned, preventing accidental modification # of values which could cause an incorrect order (eg diff --git a/codex/sales/statemachine.nim b/codex/sales/statemachine.nim index d1732549..dc199ade 100644 --- a/codex/sales/statemachine.nim +++ b/codex/sales/statemachine.nim @@ -1,5 +1,4 @@ import pkg/questionable -import pkg/upraises import ../errors import ../utils/asyncstatemachine import ../market @@ -16,17 +15,17 @@ type method onCancelled*( state: SaleState, request: StorageRequest -): ?State {.base, upraises: [].} = +): ?State {.base, raises: [].} = discard method onFailed*( state: SaleState, request: StorageRequest -): ?State {.base, upraises: [].} = +): ?State {.base, raises: [].} = discard method onSlotFilled*( state: SaleState, requestId: RequestId, slotIndex: uint64 -): ?State {.base, upraises: [].} = +): ?State {.base, raises: [].} = discard proc cancelledEvent*(request: StorageRequest): Event = diff --git a/codex/sales/states/errored.nim b/codex/sales/states/errored.nim index 95848fd3..3887c652 100644 --- a/codex/sales/states/errored.nim +++ b/codex/sales/states/errored.nim @@ -1,6 +1,5 @@ import pkg/questionable import pkg/questionable/results -import pkg/upraises import ../statemachine import ../salesagent diff --git a/codex/sales/states/filled.nim b/codex/sales/states/filled.nim index b0fc65c9..ec54762a 100644 --- a/codex/sales/states/filled.nim +++ b/codex/sales/states/filled.nim @@ -11,7 +11,7 @@ import ./cancelled import ./failed import ./proving -when codex_enable_proof_failures: +when storage_enable_proof_failures: import ./provingsimulated logScope: @@ -59,7 +59,7 @@ method run*( if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: return some State(SaleErrored(error: err)) - when codex_enable_proof_failures: + when storage_enable_proof_failures: if context.simulateProofFailures > 0: info "Proving with failure rate", rate = context.simulateProofFailures return some State( diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index dba249de..ff6eb88c 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -61,7 +61,7 @@ method run*( return some State(SaleIgnored(reprocessSlot: false)) # TODO: Once implemented, check to ensure the host is allowed to fill the slot, - # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) + # due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal) logScope: slotIndex = data.slotIndex diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index edf7eb1e..8ba468e7 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -1,5 +1,5 @@ import ../../conf -when codex_enable_proof_failures: +when storage_enable_proof_failures: import std/strutils import pkg/stint import pkg/ethers diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 5fbb0fe1..b1ca502f 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -18,18 +18,20 @@ import pkg/chronos import pkg/questionable import pkg/questionable/results import pkg/constantine/math/io/io_fields +import pkg/taskpools import ../../logutils import ../../utils import ../../stores import ../../manifest import ../../merkletree +import ../../utils/poseidon2digest import ../../utils/asynciter import ../../indexingstrategy import ../converters -export converters, asynciter +export converters, asynciter, poseidon2digest logScope: topics = "codex slotsbuilder" @@ -45,6 +47,7 @@ type SlotsBuilder*[T, H] = ref object of RootObj emptyBlock: seq[byte] # empty block verifiableTree: ?T # verification tree (dataset tree) emptyDigestTree: T # empty digest tree for empty blocks + taskPool: Taskpool func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} = ## Returns true if the slots are verifiable. @@ -165,6 +168,35 @@ proc buildBlockTree*[T, H]( success (blk.data, tree) +proc getBlockDigest*[T, H]( + self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural +): Future[?!H] {.async: (raises: [CancelledError]).} = + logScope: + blkIdx = blkIdx + slotPos = slotPos + numSlotBlocks = self.manifest.numSlotBlocks + cellSize = self.cellSize + + trace "Building block tree" + + if slotPos > (self.manifest.numSlotBlocks - 1): + # pad blocks are 0 byte blocks + trace "Returning empty digest tree for pad block" + return self.emptyDigestTree.root + + without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err: + error "Failed to get block CID for tree at index", err = err.msg + return failure(err) + + if blk.isEmpty: + return self.emptyDigestTree.root + + without dg =? (await T.digest(self.taskPool, blk.data, self.cellSize.int)), err: + error "Failed to create digest for block", err = err.msg + return failure(err) + + return success dg + proc getCellHashes*[T, H]( self: SlotsBuilder[T, H], slotIndex: Natural ): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} = @@ -190,8 +222,7 @@ proc getCellHashes*[T, H]( pos = i trace "Getting block CID for tree at index" - without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root, - err: + without digest =? (await self.getBlockDigest(blkIdx, i)), err: error "Failed to get block CID for tree at index", err = err.msg return failure(err) @@ -310,6 +341,7 @@ proc new*[T, H]( _: type SlotsBuilder[T, H], store: BlockStore, manifest: Manifest, + taskPool: Taskpool, strategy = LinearStrategy, cellSize = DefaultCellSize, ): ?!SlotsBuilder[T, H] = @@ -383,6 +415,7 @@ proc new*[T, H]( emptyBlock: emptyBlock, numSlotBlocks: numSlotBlocksTotal, emptyDigestTree: emptyDigestTree, + taskPool: taskPool, ) if manifest.verifiable: diff --git a/codex/slots/converters.nim b/codex/slots/converters.nim index f0dc3990..f22a7d64 100644 --- a/codex/slots/converters.nim +++ b/codex/slots/converters.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim index 1d2e3e19..479866f4 100644 --- a/codex/slots/proofs/backends/circomcompat.nim +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/slots/proofs/backends/converters.nim b/codex/slots/proofs/backends/converters.nim index ee771477..19ea3b35 100644 --- a/codex/slots/proofs/backends/converters.nim +++ b/codex/slots/proofs/backends/converters.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 1afcd068..eeb8cbe3 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -13,6 +13,7 @@ import pkg/chronicles import pkg/circomcompat import pkg/poseidon2 import pkg/questionable/results +import pkg/taskpools import pkg/libp2p/cid @@ -47,6 +48,7 @@ type backend: AnyBackend store: BlockStore nSamples: int + taskPool: Taskpool proc prove*( self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge @@ -61,7 +63,7 @@ proc prove*( trace "Received proof challenge" - without builder =? AnyBuilder.new(self.store, manifest), err: + without builder =? AnyBuilder.new(self.store, manifest, self.taskPool), err: error "Unable to create slots builder", err = err.msg return failure(err) @@ -88,6 +90,6 @@ proc verify*( self.backend.verify(proof, inputs) proc new*( - _: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int + _: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int, tp: Taskpool ): Prover = - Prover(store: store, backend: backend, nSamples: nSamples) + Prover(store: store, backend: backend, nSamples: nSamples, taskPool: tp) diff --git a/codex/slots/sampler/sampler.nim b/codex/slots/sampler/sampler.nim index d7a36cfd..1afae3fb 100644 --- a/codex/slots/sampler/sampler.nim +++ b/codex/slots/sampler/sampler.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/slots/sampler/utils.nim b/codex/slots/sampler/utils.nim index ce78fadc..0e7c5b93 100644 --- a/codex/slots/sampler/utils.nim +++ b/codex/slots/sampler/utils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/slots/types.nim b/codex/slots/types.nim index 0cd24326..5976c9b0 100644 --- a/codex/slots/types.nim +++ b/codex/slots/types.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/stores/blockstore.nim b/codex/stores/blockstore.nim index e436577c..732d29e4 100644 --- a/codex/stores/blockstore.nim +++ b/codex/stores/blockstore.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -29,7 +29,7 @@ type Block Both - CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, async: (raises: []).} + CidCallback* = proc(cid: Cid): Future[void] {.async: (raises: []).} BlockStore* = ref object of RootObj onBlockStored*: ?CidCallback @@ -70,6 +70,14 @@ method completeBlock*( ) {.base, gcsafe.} = discard +method getBlocks*( + self: BlockStore, addresses: seq[BlockAddress] +): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} = + ## Gets a set of blocks from the blockstore. Blocks might + ## be returned in any order. + + raiseAssert("getBlocks not implemented!") + method getBlockAndProof*( self: BlockStore, treeCid: Cid, index: Natural ): Future[?!(Block, CodexProof)] {.base, async: (raises: [CancelledError]), gcsafe.} = diff --git a/codex/stores/cachestore.nim b/codex/stores/cachestore.nim index ff3fd6df..cb5da7a3 100644 --- a/codex/stores/cachestore.nim +++ b/codex/stores/cachestore.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -66,6 +66,21 @@ method getBlock*( trace "Error requesting block from cache", cid, error = exc.msg return failure exc +method getBlocks*( + self: CacheStore, addresses: seq[BlockAddress] +): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} = + var i = 0 + + proc isFinished(): bool = + i == addresses.len + + proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} = + let value = await self.getBlock(addresses[i]) + inc(i) + return value + + return SafeAsyncIter[Block].new(genNext, isFinished) + method getCidAndProof*( self: CacheStore, treeCid: Cid, index: Natural ): Future[?!(Cid, CodexProof)] {.async: (raises: [CancelledError]).} = diff --git a/codex/stores/keyutils.nim b/codex/stores/keyutils.nim index 0634b6a2..78efa732 100644 --- a/codex/stores/keyutils.nim +++ b/codex/stores/keyutils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import std/sugar import pkg/questionable/results diff --git a/codex/stores/maintenance.nim b/codex/stores/maintenance.nim index 1d109031..38486239 100644 --- a/codex/stores/maintenance.nim +++ b/codex/stores/maintenance.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/stores/networkstore.nim b/codex/stores/networkstore.nim index 06b96b77..200024f9 100644 --- a/codex/stores/networkstore.nim +++ b/codex/stores/networkstore.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -31,6 +31,31 @@ type NetworkStore* = ref object of BlockStore engine*: BlockExcEngine # blockexc decision engine localStore*: BlockStore # local block store +method getBlocks*( + self: NetworkStore, addresses: seq[BlockAddress] +): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} = + var + localAddresses: seq[BlockAddress] + remoteAddresses: seq[BlockAddress] + + let runtimeQuota = 10.milliseconds + var lastIdle = Moment.now() + + for address in addresses: + if not (await address in self.localStore): + remoteAddresses.add(address) + else: + localAddresses.add(address) + + if (Moment.now() - lastIdle) >= runtimeQuota: + await idleAsync() + lastIdle = Moment.now() + + return chain( + await self.localStore.getBlocks(localAddresses), + self.engine.requestBlocks(remoteAddresses), + ) + method getBlock*( self: NetworkStore, address: BlockAddress ): Future[?!Block] {.async: (raises: [CancelledError]).} = diff --git a/codex/stores/repostore/coders.nim b/codex/stores/repostore/coders.nim index 47df7219..a840308c 100644 --- a/codex/stores/repostore/coders.nim +++ b/codex/stores/repostore/coders.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index ddbfdfb0..e27322ea 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index bea2971c..aab1a080 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -38,6 +38,21 @@ logScope: # BlockStore API ########################################################### +method getBlocks*( + self: RepoStore, addresses: seq[BlockAddress] +): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} = + var i = 0 + + proc isFinished(): bool = + i == addresses.len + + proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} = + let value = await self.getBlock(addresses[i]) + inc(i) + return value + + return SafeAsyncIter[Block].new(genNext, isFinished) + method getBlock*( self: RepoStore, cid: Cid ): Future[?!Block] {.async: (raises: [CancelledError]).} = @@ -428,7 +443,6 @@ proc start*( ): Future[void] {.async: (raises: [CancelledError, CodexError]).} = ## Start repo ## - if self.started: trace "Repo already started" return @@ -450,6 +464,5 @@ proc stop*(self: RepoStore): Future[void] {.async: (raises: []).} = return trace "Stopping repo" - await self.close() self.started = false diff --git a/codex/stores/repostore/types.nim b/codex/stores/repostore/types.nim index 42f528e9..bd43dded 100644 --- a/codex/stores/repostore/types.nim +++ b/codex/stores/repostore/types.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2024 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/stores/treehelper.nim b/codex/stores/treehelper.nim index e1f5d48d..9b0eb5d9 100644 --- a/codex/stores/treehelper.nim +++ b/codex/stores/treehelper.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,10 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import std/sugar import pkg/chronos diff --git a/codex/streams/asyncstreamwrapper.nim b/codex/streams/asyncstreamwrapper.nim index 6708816d..f80c0696 100644 --- a/codex/streams/asyncstreamwrapper.nim +++ b/codex/streams/asyncstreamwrapper.nim @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/chronos import pkg/libp2p diff --git a/codex/streams/seekablestream.nim b/codex/streams/seekablestream.nim index c48ec28f..926fa70d 100644 --- a/codex/streams/seekablestream.nim +++ b/codex/streams/seekablestream.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index 2e06d39d..af747dd6 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,10 +9,7 @@ import std/options -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/chronos import pkg/stew/ptrops diff --git a/codex/systemclock.nim b/codex/systemclock.nim index 6226f627..93c70d89 100644 --- a/codex/systemclock.nim +++ b/codex/systemclock.nim @@ -1,9 +1,8 @@ import std/times -import pkg/upraises import ./clock type SystemClock* = ref object of Clock -method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} = +method now*(clock: SystemClock): SecondsSince1970 {.raises: [].} = let now = times.now().utc now.toTime().toUnix() diff --git a/codex/units.nim b/codex/units.nim index b600103f..e95f9e81 100644 --- a/codex/units.nim +++ b/codex/units.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/utils.nim b/codex/utils.nim index 9cea427e..618db055 100644 --- a/codex/utils.nim +++ b/codex/utils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/utils/addrutils.nim b/codex/utils/addrutils.nim index a9ec54f5..1b7a556a 100644 --- a/codex/utils/addrutils.nim +++ b/codex/utils/addrutils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,15 +7,13 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [], gcsafe.} +import std/net import std/strutils import std/options import pkg/libp2p -import pkg/stew/shims/net import pkg/stew/endians2 func remapAddr*( diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index bc37c462..3794e5cc 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/codex/utils/fileutils.nim b/codex/utils/fileutils.nim index 6f12dd76..52427c87 100644 --- a/codex/utils/fileutils.nim +++ b/codex/utils/fileutils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2021 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -9,10 +9,7 @@ ## Partially taken from nim beacon chain -import pkg/upraises - -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import std/strutils import pkg/stew/io2 diff --git a/codex/utils/keyutils.nim b/codex/utils/keyutils.nim index 664396d3..dcdc17bb 100644 --- a/codex/utils/keyutils.nim +++ b/codex/utils/keyutils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [], gcsafe.} import pkg/questionable/results import pkg/libp2p/crypto/crypto diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 996d8dd0..45ad7589 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,7 +1,6 @@ {.push raises: [].} -import - std/[tables, hashes], pkg/results, pkg/stew/shims/net as stewNet, chronos, chronicles +import std/[net, tables, hashes], pkg/results, chronos, chronicles import pkg/libp2p diff --git a/codex/utils/poseidon2digest.nim b/codex/utils/poseidon2digest.nim index 6eaf21e9..321dbe12 100644 --- a/codex/utils/poseidon2digest.nim +++ b/codex/utils/poseidon2digest.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -7,13 +7,27 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +import std/[atomics] import pkg/poseidon2 import pkg/questionable/results import pkg/libp2p/multihash import pkg/stew/byteutils +import pkg/taskpools +import pkg/chronos +import pkg/chronos/threadsync +import ./uniqueptr import ../merkletree +type DigestTask* = object + signal: ThreadSignalPtr + bytes: seq[byte] + chunkSize: int + success: Atomic[bool] + digest: UniquePtr[Poseidon2Hash] + +export DigestTask + func spongeDigest*( _: type Poseidon2Hash, bytes: openArray[byte], rate: static int = 2 ): ?!Poseidon2Hash = @@ -30,7 +44,7 @@ func spongeDigest*( success Sponge.digest(bytes, rate) -func digestTree*( +proc digestTree*( _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int ): ?!Poseidon2Tree = ## Hashes chunks of data with a sponge of rate 2, and combines the @@ -44,6 +58,7 @@ func digestTree*( var index = 0 var leaves: seq[Poseidon2Hash] + while index < bytes.len: let start = index let finish = min(index + chunkSize, bytes.len) @@ -61,6 +76,46 @@ func digest*( (?Poseidon2Tree.digestTree(bytes, chunkSize)).root +proc digestWorker(tp: Taskpool, task: ptr DigestTask) {.gcsafe.} = + defer: + discard task[].signal.fireSync() + + var res = Poseidon2Tree.digest(task[].bytes, task[].chunkSize) + + if res.isErr: + task[].success.store(false) + return + + task[].digest = newUniquePtr(res.get()) + task[].success.store(true) + +proc digest*( + _: type Poseidon2Tree, tp: Taskpool, bytes: seq[byte], chunkSize: int +): Future[?!Poseidon2Hash] {.async: (raises: [CancelledError]).} = + without signal =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + defer: + signal.close().expect("closing once works") + + doAssert tp.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + + var task = DigestTask(signal: signal, bytes: bytes, chunkSize: chunkSize) + + tp.spawn digestWorker(tp, addr task) + + let signalFut = signal.wait() + + if err =? catch(await signalFut.join()).errorOption: + ?catch(await noCancel signalFut) + if err of CancelledError: + raise (ref CancelledError) err + + if not task.success.load(): + return failure("digest task failed") + + success extractValue(task.digest) + func digestMhash*( _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int ): ?!MultiHash = diff --git a/codex/utils/safeasynciter.nim b/codex/utils/safeasynciter.nim index d582fec3..56b1d697 100644 --- a/codex/utils/safeasynciter.nim +++ b/codex/utils/safeasynciter.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2025 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -232,3 +232,28 @@ proc empty*[T](_: type SafeAsyncIter[T]): SafeAsyncIter[T] = true SafeAsyncIter[T].new(genNext, isFinished) + +proc chain*[T](iters: seq[SafeAsyncIter[T]]): SafeAsyncIter[T] = + if iters.len == 0: + return SafeAsyncIter[T].empty + + var curIdx = 0 + + proc ensureNext(): void = + while curIdx < iters.len and iters[curIdx].finished: + inc(curIdx) + + proc isFinished(): bool = + curIdx == iters.len + + proc genNext(): Future[?!T] {.async: (raises: [CancelledError]).} = + let item = await iters[curIdx].next() + ensureNext() + return item + + ensureNext() + + return SafeAsyncIter[T].new(genNext, isFinished) + +proc chain*[T](iters: varargs[SafeAsyncIter[T]]): SafeAsyncIter[T] = + chain(iters.toSeq) diff --git a/codex/utils/sharedbuf.nim b/codex/utils/sharedbuf.nim new file mode 100644 index 00000000..186d7126 --- /dev/null +++ b/codex/utils/sharedbuf.nim @@ -0,0 +1,24 @@ +import stew/ptrops + +type SharedBuf*[T] = object + payload*: ptr UncheckedArray[T] + len*: int + +proc view*[T](_: type SharedBuf, v: openArray[T]): SharedBuf[T] = + if v.len > 0: + SharedBuf[T](payload: makeUncheckedArray(addr v[0]), len: v.len) + else: + default(SharedBuf[T]) + +template checkIdx(v: SharedBuf, i: int) = + doAssert i > 0 and i <= v.len + +proc `[]`*[T](v: SharedBuf[T], i: int): var T = + v.checkIdx(i) + v.payload[i] + +template toOpenArray*[T](v: SharedBuf[T]): var openArray[T] = + v.payload.toOpenArray(0, v.len - 1) + +template toOpenArray*[T](v: SharedBuf[T], s, e: int): var openArray[T] = + v.toOpenArray().toOpenArray(s, e) diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index c4b6c4a6..fc22dfd3 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -17,7 +17,7 @@ import pkg/chronos import ../logutils type - TimerCallback* = proc(): Future[void] {.gcsafe, async: (raises: []).} + TimerCallback* = proc(): Future[void] {.async: (raises: []).} Timer* = ref object of RootObj callback: TimerCallback interval: Duration diff --git a/codex/utils/uniqueptr.nim b/codex/utils/uniqueptr.nim new file mode 100644 index 00000000..2aec0d38 --- /dev/null +++ b/codex/utils/uniqueptr.nim @@ -0,0 +1,58 @@ +import std/isolation +type UniquePtr*[T] = object + ## A unique pointer to a seq[seq[T]] in shared memory + ## Can only be moved, not copied + data: ptr T + +proc newUniquePtr*[T](data: sink Isolated[T]): UniquePtr[T] = + ## Creates a new unique sequence in shared memory + ## The memory is automatically freed when the object is destroyed + result.data = cast[ptr T](allocShared0(sizeof(T))) + result.data[] = extract(data) + +template newUniquePtr*[T](data: T): UniquePtr[T] = + newUniquePtr(isolate(data)) + +proc `=destroy`*[T](p: var UniquePtr[T]) = + ## Destructor for UniquePtr + if p.data != nil: + deallocShared(p.data) + p.data = nil + +proc `=copy`*[T]( + dest: var UniquePtr[T], src: UniquePtr[T] +) {.error: "UniquePtr cannot be copied, only moved".} + +proc `=sink`*[T](dest: var UniquePtr[T], src: UniquePtr[T]) = + if dest.data != nil: + `=destroy`(dest) + dest.data = src.data + # We need to nil out the source data to prevent double-free + # This is handled by Nim's destructive move semantics + +proc `[]`*[T](p: UniquePtr[T]): lent T = + ## Access the data (read-only) + if p.data == nil: + raise newException(NilAccessDefect, "accessing nil UniquePtr") + p.data[] + +# proc `[]`*[T](p: var UniquePtr[T]): var T = +# ## Access the data (mutable) +# if p.data == nil: +# raise newException(NilAccessDefect, "accessing nil UniquePtr") +# p.data[] + +proc isNil*[T](p: UniquePtr[T]): bool = + ## Check if the UniquePtr is nil + p.data == nil + +proc extractValue*[T](p: var UniquePtr[T]): T = + ## Extract the value from the UniquePtr and release the memory + if p.data == nil: + raise newException(NilAccessDefect, "extracting from nil UniquePtr") + # Move the value out + var isolated = isolate(p.data[]) + result = extract(isolated) + # Free the shared memory + deallocShared(p.data) + p.data = nil diff --git a/codex/validation.nim b/codex/validation.nim index 58a0e6b7..d9f8fb5e 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -80,7 +80,7 @@ proc removeSlotsThatHaveEnded(validation: Validation) {.async.} = proc markProofAsMissing( validation: Validation, slotId: SlotId, period: Period -) {.async.} = +) {.async: (raises: [CancelledError]).} = logScope: currentPeriod = validation.getCurrentPeriod() @@ -91,18 +91,18 @@ proc markProofAsMissing( else: let inDowntime {.used.} = await validation.market.inDowntime(slotId) trace "Proof not missing", checkedPeriod = period, inDowntime - except CancelledError: - raise + except CancelledError as e: + raise e except CatchableError as e: error "Marking proof as missing failed", msg = e.msg -proc markProofsAsMissing(validation: Validation) {.async.} = +proc markProofsAsMissing(validation: Validation) {.async: (raises: [CancelledError]).} = let slots = validation.slots for slotId in slots: let previousPeriod = validation.getCurrentPeriod() - 1 await validation.markProofAsMissing(slotId, previousPeriod) -proc run(validation: Validation) {.async: (raises: []).} = +proc run(validation: Validation) {.async: (raises: [CancelledError]).} = trace "Validation started" try: while true: diff --git a/config.nims b/config.nims index 8700f60e..e9e3eb0a 100644 --- a/config.nims +++ b/config.nims @@ -65,8 +65,8 @@ else: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782 # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) switch("passC", "-march=x86-64") - else: switch("passC", "-march=native") - + else: + switch("passC", "-march=native") --tlsEmulation: off @@ -92,6 +92,12 @@ else: on --warningAsError: "ProveField:on" +--define: + "libp2p_multicodec_exts:../../../codex/multicodec_exts.nim" +--define: + "libp2p_multihash_exts:../../../codex/multihash_exts.nim" +--define: + "libp2p_contentids_exts:../../../codex/contentids_exts.nim" when (NimMajor, NimMinor) >= (1, 4): --warning: diff --git a/examples/golang/README.md b/examples/golang/README.md new file mode 100644 index 00000000..119648c2 --- /dev/null +++ b/examples/golang/README.md @@ -0,0 +1,24 @@ + +## Pre-requisite + +libstorage.so is needed to be compiled and present in build folder. + +## Compilation + +From the Logos Storage root folder: + +```code +go build -o storage-go examples/golang/storage.go +``` + +## Run +From the storage root folder: + + +```code +export LD_LIBRARY_PATH=build +``` + +```code +./storage-go +``` diff --git a/examples/golang/hello.txt b/examples/golang/hello.txt new file mode 100644 index 00000000..c57eff55 --- /dev/null +++ b/examples/golang/hello.txt @@ -0,0 +1 @@ +Hello World! \ No newline at end of file diff --git a/examples/golang/storage.go b/examples/golang/storage.go new file mode 100644 index 00000000..5908afc9 --- /dev/null +++ b/examples/golang/storage.go @@ -0,0 +1,885 @@ +package main + +/* + #cgo LDFLAGS: -L../../build/ -lstorage + #cgo LDFLAGS: -L../../ -Wl,-rpath,../../ + + #include + #include + #include "../../library/libstorage.h" + + typedef struct { + int ret; + char* msg; + size_t len; + uintptr_t h; + } Resp; + + static void* allocResp(uintptr_t h) { + Resp* r = (Resp*)calloc(1, sizeof(Resp)); + r->h = h; + return r; + } + + static void freeResp(void* resp) { + if (resp != NULL) { + free(resp); + } + } + + static int getRet(void* resp) { + if (resp == NULL) { + return 0; + } + Resp* m = (Resp*) resp; + return m->ret; + } + + void libstorageNimMain(void); + + static void storage_host_init_once(void){ + static int done; + if (!__atomic_exchange_n(&done, 1, __ATOMIC_SEQ_CST)) libstorageNimMain(); + } + + // resp must be set != NULL in case interest on retrieving data from the callback + void callback(int ret, char* msg, size_t len, void* resp); + + static void* cGoStorageNew(const char* configJson, void* resp) { + void* ret = storage_new(configJson, (StorageCallback) callback, resp); + return ret; + } + + static int cGoStorageStart(void* storageCtx, void* resp) { + return storage_start(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageStop(void* storageCtx, void* resp) { + return storage_stop(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageClose(void* storageCtx, void* resp) { + return storage_close(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageDestroy(void* storageCtx, void* resp) { + return storage_destroy(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageVersion(void* storageCtx, void* resp) { + return storage_version(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageRevision(void* storageCtx, void* resp) { + return storage_revision(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageRepo(void* storageCtx, void* resp) { + return storage_repo(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageSpr(void* storageCtx, void* resp) { + return storage_spr(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStoragePeerId(void* storageCtx, void* resp) { + return storage_peer_id(storageCtx, (StorageCallback) callback, resp); + } + + static int cGoStorageUploadInit(void* storageCtx, char* filepath, size_t chunkSize, void* resp) { + return storage_upload_init(storageCtx, filepath, chunkSize, (StorageCallback) callback, resp); + } + + static int cGoStorageUploadChunk(void* storageCtx, char* sessionId, const uint8_t* chunk, size_t len, void* resp) { + return storage_upload_chunk(storageCtx, sessionId, chunk, len, (StorageCallback) callback, resp); + } + + static int cGoStorageUploadFinalize(void* storageCtx, char* sessionId, void* resp) { + return storage_upload_finalize(storageCtx, sessionId, (StorageCallback) callback, resp); + } + + static int cGoStorageUploadCancel(void* storageCtx, char* sessionId, void* resp) { + return storage_upload_cancel(storageCtx, sessionId, (StorageCallback) callback, resp); + } + + static int cGoStorageUploadFile(void* storageCtx, char* sessionId, void* resp) { + return storage_upload_file(storageCtx, sessionId, (StorageCallback) callback, resp); + } + + static int cGoStorageLogLevel(void* storageCtx, char* logLevel, void* resp) { + return storage_log_level(storageCtx, logLevel, (StorageCallback) callback, resp); + } + + static int cGoStorageExists(void* storageCtx, char* cid, void* resp) { + return storage_exists(storageCtx, cid, (StorageCallback) callback, resp); + } +*/ +import "C" +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "os/signal" + "runtime/cgo" + "sync" + "syscall" + "unsafe" +) + +type LogFormat string + +const ( + LogFormatAuto LogFormat = "auto" + LogFormatColors LogFormat = "colors" + LogFormatNoColors LogFormat = "nocolors" + LogFormatJSON LogFormat = "json" +) + +type RepoKind string + +const ( + FS RepoKind = "fs" + SQLite RepoKind = "sqlite" + LevelDb RepoKind = "leveldb" +) + +const defaultBlockSize = 1024 * 64 + +type Config struct { + // Default: INFO + LogLevel string `json:"log-level,omitempty"` + + // Specifies what kind of logs should be written to stdout + // Default: auto + LogFormat LogFormat `json:"log-format,omitempty"` + + // Enable the metrics server + // Default: false + MetricsEnabled bool `json:"metrics,omitempty"` + + // Listening address of the metrics server + // Default: 127.0.0.1 + MetricsAddress string `json:"metrics-address,omitempty"` + + // Listening HTTP port of the metrics server + // Default: 8008 + MetricsPort int `json:"metrics-port,omitempty"` + + // The directory where logos storage will store configuration and data + // Default: + // $HOME\AppData\Roaming\Logos Storage on Windows + // $HOME/Library/Application Support/Logos Storage on macOS + // $HOME/.cache/logos_storage on Linux + DataDir string `json:"data-dir,omitempty"` + + // Multi Addresses to listen on + // Default: ["/ip4/0.0.0.0/tcp/0"] + ListenAddrs []string `json:"listen-addrs,omitempty"` + + // Specify method to use for determining public address. + // Must be one of: any, none, upnp, pmp, extip: + // Default: any + Nat string `json:"nat,omitempty"` + + // Discovery (UDP) port + // Default: 8090 + DiscoveryPort int `json:"disc-port,omitempty"` + + // Source of network (secp256k1) private key file path or name + // Default: "key" + NetPrivKeyFile string `json:"net-privkey,omitempty"` + + // Specifies one or more bootstrap nodes to use when connecting to the network. + BootstrapNodes []string `json:"bootstrap-node,omitempty"` + + // The maximum number of peers to connect to. + // Default: 160 + MaxPeers int `json:"max-peers,omitempty"` + + // Number of worker threads (\"0\" = use as many threads as there are CPU cores available) + // Default: 0 + NumThreads int `json:"num-threads,omitempty"` + + // Node agent string which is used as identifier in network + // Default: "Logos Storage" + AgentString string `json:"agent-string,omitempty"` + + // Backend for main repo store (fs, sqlite, leveldb) + // Default: fs + RepoKind RepoKind `json:"repo-kind,omitempty"` + + // The size of the total storage quota dedicated to the node + // Default: 20 GiBs + StorageQuota int `json:"storage-quota,omitempty"` + + // Default block timeout in seconds - 0 disables the ttl + // Default: 30 days + BlockTtl int `json:"block-ttl,omitempty"` + + // Time interval in seconds - determines frequency of block + // maintenance cycle: how often blocks are checked for expiration and cleanup + // Default: 10 minutes + BlockMaintenanceInterval int `json:"block-mi,omitempty"` + + // Number of blocks to check every maintenance cycle + // Default: 1000 + BlockMaintenanceNumberOfBlocks int `json:"block-mn,omitempty"` + + // Number of times to retry fetching a block before giving up + // Default: 3000 + BlockRetries int `json:"block-retries,omitempty"` + + // The size of the block cache, 0 disables the cache - + // might help on slow hardrives + // Default: 0 + CacheSize int `json:"cache-size,omitempty"` + + // Default: "" (no log file) + LogFile string `json:"log-file,omitempty"` +} + +type StorageNode struct { + ctx unsafe.Pointer +} + +type ChunkSize int + +func (c ChunkSize) valOrDefault() int { + if c == 0 { + return defaultBlockSize + } + + return int(c) +} + +func (c ChunkSize) toSizeT() C.size_t { + return C.size_t(c.valOrDefault()) +} + +// bridgeCtx is used for managing the C-Go bridge calls. +// It contains a wait group for synchronizing the calls, +// a cgo.Handle for passing context to the C code, +// a response pointer for receiving data from the C code, +// and fields for storing the result and error of the call. +type bridgeCtx struct { + wg *sync.WaitGroup + h cgo.Handle + resp unsafe.Pointer + result string + err error + + // Callback used for receiving progress updates during upload/download. + // + // For the upload, the bytes parameter indicates the number of bytes uploaded. + // If the chunk size is superior or equal to the blocksize (passed in init function), + // the callback will be called when a block is put in the store. + // Otherwise, it will be called when a chunk is pushed into the stream. + // + // For the download, the bytes is the size of the chunk received, and the chunk + // is the actual chunk of data received. + onProgress func(bytes int, chunk []byte) +} + +// newBridgeCtx creates a new bridge context for managing C-Go calls. +// The bridge context is initialized with a wait group and a cgo.Handle. +func newBridgeCtx() *bridgeCtx { + bridge := &bridgeCtx{} + bridge.wg = &sync.WaitGroup{} + bridge.wg.Add(1) + bridge.h = cgo.NewHandle(bridge) + bridge.resp = C.allocResp(C.uintptr_t(uintptr(bridge.h))) + return bridge +} + +// callError creates an error message for a failed C-Go call. +func (b *bridgeCtx) callError(name string) error { + return fmt.Errorf("failed the call to %s returned code %d", name, C.getRet(b.resp)) +} + +// free releases the resources associated with the bridge context, +// including the cgo.Handle and the response pointer. +func (b *bridgeCtx) free() { + if b.h > 0 { + b.h.Delete() + b.h = 0 + } + + if b.resp != nil { + C.freeResp(b.resp) + b.resp = nil + } +} + +// callback is the function called by the C code to communicate back to Go. +// It handles progress updates, successful completions, and errors. +// The function uses the response pointer to retrieve the bridge context +// and update its state accordingly. +// +//export callback +func callback(ret C.int, msg *C.char, len C.size_t, resp unsafe.Pointer) { + if resp == nil { + return + } + + m := (*C.Resp)(resp) + m.ret = ret + m.msg = msg + m.len = len + + if m.h == 0 { + return + } + + h := cgo.Handle(m.h) + if h == 0 { + return + } + + if v, ok := h.Value().(*bridgeCtx); ok { + switch ret { + case C.RET_PROGRESS: + if v.onProgress == nil { + return + } + if msg != nil { + chunk := C.GoBytes(unsafe.Pointer(msg), C.int(len)) + v.onProgress(int(C.int(len)), chunk) + } else { + v.onProgress(int(C.int(len)), nil) + } + case C.RET_OK: + retMsg := C.GoStringN(msg, C.int(len)) + v.result = retMsg + v.err = nil + if v.wg != nil { + v.wg.Done() + } + case C.RET_ERR: + retMsg := C.GoStringN(msg, C.int(len)) + v.err = errors.New(retMsg) + if v.wg != nil { + v.wg.Done() + } + } + } +} + +// wait waits for the bridge context to complete its operation. +// It returns the result and error of the operation. +func (b *bridgeCtx) wait() (string, error) { + b.wg.Wait() + return b.result, b.err +} + +type OnUploadProgressFunc func(read, total int, percent float64, err error) + +type UploadOptions struct { + // Filepath can be the full path when using UploadFile + // otherwise the file name. + // It is used to detect the mimetype. + Filepath string + + // ChunkSize is the size of each upload chunk, passed as `blockSize` to the Logos Storage node + // store. Default is to 64 KB. + ChunkSize ChunkSize + + // OnProgress is a callback function that is called after each chunk is uploaded with: + // - read: the number of bytes read in the last chunk. + // - total: the total number of bytes read so far. + // - percent: the percentage of the total file size that has been uploaded. It is + // determined from a `stat` call if it is a file and from the length of the buffer + // if it is a buffer. Otherwise, it is 0. + // - err: an error, if one occurred. + // + // If the chunk size is more than the `chunkSize` parameter, the callback is called + // after the block is actually stored in the block store. Otherwise, it is called + // after the chunk is sent to the stream. + OnProgress OnUploadProgressFunc +} + +func getReaderSize(r io.Reader) int64 { + switch v := r.(type) { + case *os.File: + stat, err := v.Stat() + if err != nil { + return 0 + } + return stat.Size() + case *bytes.Buffer: + return int64(v.Len()) + default: + return 0 + } +} + +// New creates a new Logos Storage node with the provided configuration. +// The node is not started automatically; you need to call StorageStart +// to start it. +// It returns a Logos Storage node that can be used to interact +// with the Logos Storage network. +func New(config Config) (*StorageNode, error) { + bridge := newBridgeCtx() + defer bridge.free() + + jsonConfig, err := json.Marshal(config) + if err != nil { + return nil, err + } + + cJsonConfig := C.CString(string(jsonConfig)) + defer C.free(unsafe.Pointer(cJsonConfig)) + + ctx := C.cGoStorageNew(cJsonConfig, bridge.resp) + + if _, err := bridge.wait(); err != nil { + return nil, bridge.err + } + + return &StorageNode{ctx: ctx}, bridge.err +} + +// Start starts the Logos Storage node. +func (node StorageNode) Start() error { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageStart(node.ctx, bridge.resp) != C.RET_OK { + return bridge.callError("cGoStorageStart") + } + + _, err := bridge.wait() + return err +} + +// StartAsync is the asynchronous version of Start. +func (node StorageNode) StartAsync(onDone func(error)) { + go func() { + err := node.Start() + onDone(err) + }() +} + +// Stop stops the Logos Storage node. +func (node StorageNode) Stop() error { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageStop(node.ctx, bridge.resp) != C.RET_OK { + return bridge.callError("cGoStorageStop") + } + + _, err := bridge.wait() + return err +} + +// Destroy destroys the Logos Storage node, freeing all resources. +// The node must be stopped before calling this method. +func (node StorageNode) Destroy() error { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageClose(node.ctx, bridge.resp) != C.RET_OK { + return bridge.callError("cGoStorageClose") + } + + _, err := bridge.wait() + if err != nil { + return err + } + + if C.cGoStorageDestroy(node.ctx, bridge.resp) != C.RET_OK { + return errors.New("Failed to destroy the Logos Storage node.") + } + + return err +} + +// Version returns the version of the Logos Storage node. +func (node StorageNode) Version() (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageVersion(node.ctx, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageVersion") + } + + return bridge.wait() +} + +func (node StorageNode) Revision() (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageRevision(node.ctx, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageRevision") + } + + return bridge.wait() +} + +// Repo returns the path of the data dir folder. +func (node StorageNode) Repo() (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageRepo(node.ctx, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageRepo") + } + + return bridge.wait() +} + +func (node StorageNode) Spr() (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStorageSpr(node.ctx, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageSpr") + } + + return bridge.wait() +} + +func (node StorageNode) PeerId() (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + if C.cGoStoragePeerId(node.ctx, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStoragePeerId") + } + + return bridge.wait() +} + +// UploadInit initializes a new upload session. +// It returns a session ID that can be used for subsequent upload operations. +// This function is called by UploadReader and UploadFile internally. +// You should use this function only if you need to manage the upload session manually. +func (node StorageNode) UploadInit(options *UploadOptions) (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + var cFilename = C.CString(options.Filepath) + defer C.free(unsafe.Pointer(cFilename)) + + if C.cGoStorageUploadInit(node.ctx, cFilename, options.ChunkSize.toSizeT(), bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageUploadInit") + } + + return bridge.wait() +} + +// UploadChunk uploads a chunk of data to the Logos Storage node. +// It takes the session ID returned by UploadInit +// and a byte slice containing the chunk data. +// This function is called by UploadReader internally. +// You should use this function only if you need to manage the upload session manually. +func (node StorageNode) UploadChunk(sessionId string, chunk []byte) error { + bridge := newBridgeCtx() + defer bridge.free() + + var cSessionId = C.CString(sessionId) + defer C.free(unsafe.Pointer(cSessionId)) + + var cChunkPtr *C.uint8_t + if len(chunk) > 0 { + cChunkPtr = (*C.uint8_t)(unsafe.Pointer(&chunk[0])) + } + + if C.cGoStorageUploadChunk(node.ctx, cSessionId, cChunkPtr, C.size_t(len(chunk)), bridge.resp) != C.RET_OK { + return bridge.callError("cGoStorageUploadChunk") + } + + _, err := bridge.wait() + return err +} + +// UploadFinalize finalizes the upload session and returns the CID of the uploaded file. +// It takes the session ID returned by UploadInit. +// This function is called by UploadReader and UploadFile internally. +// You should use this function only if you need to manage the upload session manually. +func (node StorageNode) UploadFinalize(sessionId string) (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + var cSessionId = C.CString(sessionId) + defer C.free(unsafe.Pointer(cSessionId)) + + if C.cGoStorageUploadFinalize(node.ctx, cSessionId, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageUploadFinalize") + } + + return bridge.wait() +} + +// UploadCancel cancels an ongoing upload session. +// It can be only if the upload session is managed manually. +// It doesn't work with UploadFile. +func (node StorageNode) UploadCancel(sessionId string) error { + bridge := newBridgeCtx() + defer bridge.free() + + var cSessionId = C.CString(sessionId) + defer C.free(unsafe.Pointer(cSessionId)) + + if C.cGoStorageUploadCancel(node.ctx, cSessionId, bridge.resp) != C.RET_OK { + return bridge.callError("cGoStorageUploadCancel") + } + + _, err := bridge.wait() + return err +} + +// UploadReader uploads data from an io.Reader to the Logos Storage node. +// It takes the upload options and the reader as parameters. +// It returns the CID of the uploaded file or an error. +// +// Internally, it calls: +// - UploadInit to create the upload session. +// - UploadChunk to upload a chunk to Logos Storage. +// - UploadFinalize to finalize the upload session. +// - UploadCancel if an error occurs. +func (node StorageNode) UploadReader(options UploadOptions, r io.Reader) (string, error) { + sessionId, err := node.UploadInit(&options) + if err != nil { + return "", err + } + + buf := make([]byte, options.ChunkSize.valOrDefault()) + total := 0 + + var size int64 + if options.OnProgress != nil { + size = getReaderSize(r) + } + + for { + n, err := r.Read(buf) + if err == io.EOF { + break + } + + if err != nil { + if cancelErr := node.UploadCancel(sessionId); cancelErr != nil { + return "", fmt.Errorf("failed to upload chunk %v and failed to cancel upload session %v", err, cancelErr) + } + + return "", err + } + + if n == 0 { + break + } + + if err := node.UploadChunk(sessionId, buf[:n]); err != nil { + if cancelErr := node.UploadCancel(sessionId); cancelErr != nil { + return "", fmt.Errorf("failed to upload chunk %v and failed to cancel upload session %v", err, cancelErr) + } + + return "", err + } + + total += n + if options.OnProgress != nil && size > 0 { + percent := float64(total) / float64(size) * 100.0 + // The last block could be a bit over the size due to padding + // on the chunk size. + if percent > 100.0 { + percent = 100.0 + } + options.OnProgress(n, total, percent, nil) + } else if options.OnProgress != nil { + options.OnProgress(n, total, 0, nil) + } + } + + return node.UploadFinalize(sessionId) +} + +// UploadReaderAsync is the asynchronous version of UploadReader using a goroutine. +func (node StorageNode) UploadReaderAsync(options UploadOptions, r io.Reader, onDone func(cid string, err error)) { + go func() { + cid, err := node.UploadReader(options, r) + onDone(cid, err) + }() +} + +// UploadFile uploads a file to the Logos Storage node. +// It takes the upload options as parameter. +// It returns the CID of the uploaded file or an error. +// +// The options parameter contains the following fields: +// - filepath: the full path of the file to upload. +// - chunkSize: the size of each upload chunk, passed as `blockSize` to the Logos Storage node +// store. Default is to 64 KB. +// - onProgress: a callback function that is called after each chunk is uploaded with: +// - read: the number of bytes read in the last chunk. +// - total: the total number of bytes read so far. +// - percent: the percentage of the total file size that has been uploaded. It is +// determined from a `stat` call. +// - err: an error, if one occurred. +// +// If the chunk size is more than the `chunkSize` parameter, the callback is called after +// the block is actually stored in the block store. Otherwise, it is called after the chunk +// is sent to the stream. +// +// Internally, it calls UploadInit to create the upload session. +func (node StorageNode) UploadFile(options UploadOptions) (string, error) { + bridge := newBridgeCtx() + defer bridge.free() + + if options.OnProgress != nil { + stat, err := os.Stat(options.Filepath) + if err != nil { + return "", err + } + + size := stat.Size() + total := 0 + + if size > 0 { + bridge.onProgress = func(read int, _ []byte) { + if read == 0 { + return + } + + total += read + percent := float64(total) / float64(size) * 100.0 + // The last block could be a bit over the size due to padding + // on the chunk size. + if percent > 100.0 { + percent = 100.0 + } + + options.OnProgress(read, int(size), percent, nil) + } + } + } + + sessionId, err := node.UploadInit(&options) + if err != nil { + return "", err + } + + var cSessionId = C.CString(sessionId) + defer C.free(unsafe.Pointer(cSessionId)) + + if C.cGoStorageUploadFile(node.ctx, cSessionId, bridge.resp) != C.RET_OK { + return "", bridge.callError("cGoStorageUploadFile") + } + + return bridge.wait() +} + +// UploadFileAsync is the asynchronous version of UploadFile using a goroutine. +func (node StorageNode) UploadFileAsync(options UploadOptions, onDone func(cid string, err error)) { + go func() { + cid, err := node.UploadFile(options) + onDone(cid, err) + }() +} + +func (node StorageNode) UpdateLogLevel(logLevel string) error { + bridge := newBridgeCtx() + defer bridge.free() + + var cLogLevel = C.CString(string(logLevel)) + defer C.free(unsafe.Pointer(cLogLevel)) + + if C.cGoStorageLogLevel(node.ctx, cLogLevel, bridge.resp) != C.RET_OK { + return bridge.callError("cGoStorageLogLevel") + } + + _, err := bridge.wait() + return err +} + +func (node StorageNode) Exists(cid string) (bool, error) { + bridge := newBridgeCtx() + defer bridge.free() + + var cCid = C.CString(cid) + defer C.free(unsafe.Pointer(cCid)) + + if C.cGoStorageExists(node.ctx, cCid, bridge.resp) != C.RET_OK { + return false, bridge.callError("cGoStorageUploadCancel") + } + + result, err := bridge.wait() + return result == "true", err +} + +func main() { + dataDir := os.TempDir() + "/data-dir" + + node, err := New(Config{ + BlockRetries: 5, + LogLevel: "WARN", + DataDir: dataDir, + }) + if err != nil { + log.Fatalf("Failed to create Logos Storage node: %v", err) + } + defer os.RemoveAll(dataDir) + + if err := node.Start(); err != nil { + log.Fatalf("Failed to start Logos Storage node: %v", err) + } + log.Println("Logos Storage node started") + + version, err := node.Version() + if err != nil { + log.Fatalf("Failed to get Logos Storage version: %v", err) + } + log.Printf("Logos Storage version: %s", version) + + err = node.UpdateLogLevel("ERROR") + if err != nil { + log.Fatalf("Failed to update log level: %v", err) + } + + cid := "zDvZRwzmAkhzDRPH5EW242gJBNZ2T7aoH2v1fVH66FxXL4kSbvyM" + exists, err := node.Exists(cid) + if err != nil { + log.Fatalf("Failed to check data existence: %v", err) + } + + if exists { + log.Fatalf("The data should not exist") + } + + buf := bytes.NewBuffer([]byte("Hello World!")) + len := buf.Len() + cid, err = node.UploadReader(UploadOptions{Filepath: "hello.txt"}, buf) + if err != nil { + log.Fatalf("Failed to upload data: %v", err) + } + log.Printf("Uploaded data with CID: %s (size: %d bytes)", cid, len) + + exists, err = node.Exists(cid) + if err != nil { + log.Fatalf("Failed to check data existence: %v", err) + } + + if !exists { + log.Fatalf("The data should exist") + } + + // Wait for a SIGINT or SIGTERM signal + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + <-ch + + if err := node.Stop(); err != nil { + log.Fatalf("Failed to stop Storage node: %v", err) + } + log.Println("Logos Storage node stopped") + + if err := node.Destroy(); err != nil { + log.Fatalf("Failed to destroy Logos Storage node: %v", err) + } +} diff --git a/flake.lock b/flake.lock index decca52e..5525546b 100644 --- a/flake.lock +++ b/flake.lock @@ -9,13 +9,13 @@ "locked": { "lastModified": 1736521871, "narHash": "sha256-d34XNLg9NGPEOARHW+BIOAWalkHdEUAwsv3mpLZQxds=", - "owner": "codex-storage", + "owner": "logos-storage", "repo": "circom-compat-ffi", "rev": "8cd4ed44fdafe59d4ec1184420639cae4c4dbab9", "type": "github" }, "original": { - "owner": "codex-storage", + "owner": "logos-storage", "repo": "circom-compat-ffi", "type": "github" } diff --git a/flake.nix b/flake.nix index 4302b3d2..8b615d46 100644 --- a/flake.nix +++ b/flake.nix @@ -1,10 +1,10 @@ { - description = "Nim Codex build flake"; + description = "Logos Storage build flake"; inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; circom-compat = { - url = "github:codex-storage/circom-compat-ffi"; + url = "github:logos-storage/circom-compat-ffi"; inputs.nixpkgs.follows = "nixpkgs"; }; }; @@ -26,11 +26,12 @@ }; build = targets: buildTarget.override { inherit targets; }; in rec { - nim-codex = build ["all"]; - default = nim-codex; + logos-storage-nim = build ["all"]; + libstorage = build ["libstorage"]; + default = logos-storage-nim; }); - nixosModules.nim-codex = { config, lib, pkgs, ... }: import ./nix/service.nix { + nixosModules.logos-storage-nim = { config, lib, pkgs, ... }: import ./nix/service.nix { inherit config lib pkgs self; circomCompatPkg = circom-compat.packages.${pkgs.system}.default; }; @@ -40,7 +41,8 @@ in { default = pkgs.mkShell { inputsFrom = [ - packages.${system}.nim-codex + packages.${system}.logos-storage-nim + packages.${system}.libstorage circom-compat.packages.${system}.default ]; # Not using buildInputs to override fakeGit and fakeCargo. @@ -51,24 +53,24 @@ checks = forAllSystems (system: let pkgs = pkgsFor.${system}; in { - nim-codex-test = pkgs.nixosTest { - name = "nim-codex-test"; + logos-storage-nim-test = pkgs.nixosTest { + name = "logos-storage-nim-test"; nodes = { server = { config, pkgs, ... }: { - imports = [ self.nixosModules.nim-codex ]; - services.nim-codex.enable = true; - services.nim-codex.settings = { - data-dir = "/var/lib/nim-codex-test"; + imports = [ self.nixosModules.logos-storage-nim ]; + services.logos-storage-nim.enable = true; + services.logos-storage-nim.settings = { + data-dir = "/var/lib/logos-storage-nim-test"; }; - systemd.services.nim-codex.serviceConfig.StateDirectory = "nim-codex-test"; + systemd.services.logos-storage-nim.serviceConfig.StateDirectory = "logos-storage-nim-test"; }; }; testScript = '' - print("Starting test: nim-codex-test") + print("Starting test: logos-storage-nim-test") machine.start() - machine.wait_for_unit("nim-codex.service") - machine.succeed("test -d /var/lib/nim-codex-test") - machine.wait_until_succeeds("journalctl -u nim-codex.service | grep 'Started codex node'", 10) + machine.wait_for_unit("logos-storage-nim.service") + machine.succeed("test -d /var/lib/logos-storage-nim-test") + machine.wait_until_succeeds("journalctl -u logos-storage-nim.service | grep 'Started Storage node'", 10) ''; }; }); diff --git a/library/README.md b/library/README.md new file mode 100644 index 00000000..655cd9c8 --- /dev/null +++ b/library/README.md @@ -0,0 +1,37 @@ +# Logos Storage Library + +Logos Storage exposes a C binding that serves as a stable contract, making it straightforward to integrate Logos Storage into other languages such as Go. + +The implementation was inspired by [nim-library-template](https://github.com/logos-co/nim-library-template) +and by the [nwaku](https://github.com/waku-org/nwaku/tree/master/library) library. + +The source code contains detailed comments to explain the threading and callback flow. +The diagram below summarizes the lifecycle: context creation, request execution, and shutdown. + +```mermaid +sequenceDiagram + autonumber + actor App as App/User + participant Go as Go Wrapper + participant C as C API (libstorage.h) + participant Ctx as StorageContext + participant Thr as Worker Thread + participant Eng as CodexServer + + App->>Go: Start + Go->>C: storage_start_node + C->>Ctx: enqueue request + C->>Ctx: fire signal + Ctx->>Thr: wake worker + Thr->>Ctx: dequeue request + Thr-->>Ctx: ACK + Ctx-->>C: forward ACK + C-->>Go: RET OK + Go->>App: Unblock + Thr->>Eng: execute (async) + Eng-->>Thr: result ready + Thr-->>Ctx: callback + Ctx-->>C: forward callback + C-->>Go: forward callback + Go-->>App: done +``` \ No newline at end of file diff --git a/library/alloc.nim b/library/alloc.nim new file mode 100644 index 00000000..1a6f118b --- /dev/null +++ b/library/alloc.nim @@ -0,0 +1,42 @@ +## Can be shared safely between threads +type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int] + +proc alloc*(str: cstring): cstring = + # Byte allocation from the given address. + # There should be the corresponding manual deallocation with deallocShared ! + if str.isNil(): + var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator + ret[0] = '\0' # Set the null terminator + return ret + + let ret = cast[cstring](allocShared(len(str) + 1)) + copyMem(ret, str, len(str) + 1) + return ret + +proc alloc*(str: string): cstring = + ## Byte allocation from the given address. + ## There should be the corresponding manual deallocation with deallocShared ! + var ret = cast[cstring](allocShared(str.len + 1)) + let s = cast[seq[char]](str) + for i in 0 ..< str.len: + ret[i] = s[i] + ret[str.len] = '\0' + return ret + +proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] = + let data = allocShared(sizeof(T) * s.len) + if s.len != 0: + copyMem(data, unsafeAddr s[0], s.len) + return (cast[ptr UncheckedArray[T]](data), s.len) + +proc deallocSharedSeq*[T](s: var SharedSeq[T]) = + deallocShared(s.data) + s.len = 0 + +proc toSeq*[T](s: SharedSeq[T]): seq[T] = + ## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required + ## as req[T] is a GC managed type. + var ret = newSeq[T]() + for i in 0 ..< s.len: + ret.add(s.data[i]) + return ret diff --git a/library/events/json_base_event.nim b/library/events/json_base_event.nim new file mode 100644 index 00000000..743444ed --- /dev/null +++ b/library/events/json_base_event.nim @@ -0,0 +1,14 @@ +# JSON Event definition +# +# This file defines de JsonEvent type, which serves as the base +# for all event types in the library +# +# Reference specification: +# https://github.com/vacp2p/rfc/blob/master/content/docs/rfcs/36/README.md#jsonsignal-type + +type JsonEvent* = ref object of RootObj + eventType* {.requiresInit.}: string + +method `$`*(jsonEvent: JsonEvent): string {.base.} = + discard + # All events should implement this diff --git a/library/ffi_types.nim b/library/ffi_types.nim new file mode 100644 index 00000000..38faf551 --- /dev/null +++ b/library/ffi_types.nim @@ -0,0 +1,62 @@ +# FFI Types and Utilities +# +# This file defines the core types and utilities for the library's foreign +# function interface (FFI), enabling interoperability with external code. + +################################################################################ +### Exported types +import results + +type StorageCallback* = proc( + callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer +) {.cdecl, gcsafe, raises: [].} + +const RET_OK*: cint = 0 +const RET_ERR*: cint = 1 +const RET_MISSING_CALLBACK*: cint = 2 +const RET_PROGRESS*: cint = 3 + +## Returns RET_OK as acknowledgment and call the callback +## with RET_OK code and the provided message. +proc success*(callback: StorageCallback, msg: string, userData: pointer): cint = + callback(RET_OK, cast[ptr cchar](msg), cast[csize_t](len(msg)), userData) + + return RET_OK + +## Returns RET_ERR as acknowledgment and call the callback +## with RET_ERR code and the provided message. +proc error*(callback: StorageCallback, msg: string, userData: pointer): cint = + let msg = "libstorage error: " & msg + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + + return RET_ERR + +## Returns RET_OK as acknowledgment if the result is ok. +## If not, return RET_ERR and call the callback with the error message. +proc okOrError*[T]( + callback: StorageCallback, res: Result[T, string], userData: pointer +): cint = + if res.isOk: + return RET_OK + + return callback.error($res.error, userData) + +### End of exported types +################################################################################ + +################################################################################ +### FFI utils + +template foreignThreadGc*(body: untyped) = + when declared(setupForeignThreadGc): + setupForeignThreadGc() + + body + + when declared(tearDownForeignThreadGc): + tearDownForeignThreadGc() + +type onDone* = proc() + +### End of FFI utils +################################################################################ diff --git a/library/libstorage.h b/library/libstorage.h new file mode 100644 index 00000000..76d5e2e1 --- /dev/null +++ b/library/libstorage.h @@ -0,0 +1,206 @@ +/** +* libstorage.h - C Interface for Example Library +* +* This header provides the public API for libstorage +* +* To see the auto-generated header by Nim, run `make libstorage` from the +* repository root. The generated file will be created at: +* nimcache/release/libstorage/libstorage.h +*/ + +#ifndef __libstorage__ +#define __libstorage__ + +#include +#include + +// The possible returned values for the functions that return int +#define RET_OK 0 +#define RET_ERR 1 +#define RET_MISSING_CALLBACK 2 +#define RET_PROGRESS 3 + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*StorageCallback) (int callerRet, const char* msg, size_t len, void* userData); + +void* storage_new( + const char* configJson, + StorageCallback callback, + void* userData); + +int storage_version( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_revision( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_repo( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_debug( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_spr( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_peer_id( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_log_level( + void* ctx, + const char* logLevel, + StorageCallback callback, + void* userData); + +int storage_connect( + void* ctx, + const char* peerId, + const char** peerAddresses, + size_t peerAddressesSize, + StorageCallback callback, + void* userData); + +int storage_peer_debug( + void* ctx, + const char* peerId, + StorageCallback callback, + void* userData); + + +int storage_upload_init( + void* ctx, + const char* filepath, + size_t chunkSize, + StorageCallback callback, + void* userData); + +int storage_upload_chunk( + void* ctx, + const char* sessionId, + const uint8_t* chunk, + size_t len, + StorageCallback callback, + void* userData); + +int storage_upload_finalize( + void* ctx, + const char* sessionId, + StorageCallback callback, + void* userData); + +int storage_upload_cancel( + void* ctx, + const char* sessionId, + StorageCallback callback, + void* userData); + +int storage_upload_file( + void* ctx, + const char* sessionId, + StorageCallback callback, + void* userData); + +int storage_download_stream( + void* ctx, + const char* cid, + size_t chunkSize, + bool local, + const char* filepath, + StorageCallback callback, + void* userData); + +int storage_download_init( + void* ctx, + const char* cid, + size_t chunkSize, + bool local, + StorageCallback callback, + void* userData); + +int storage_download_chunk( + void* ctx, + const char* cid, + StorageCallback callback, + void* userData); + +int storage_download_cancel( + void* ctx, + const char* cid, + StorageCallback callback, + void* userData); + +int storage_download_manifest( + void* ctx, + const char* cid, + StorageCallback callback, + void* userData); + +int storage_list( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_space( + void* ctx, + StorageCallback callback, + void* userData); + +int storage_delete( + void* ctx, + const char* cid, + StorageCallback callback, + void* userData); + +int storage_fetch( + void* ctx, + const char* cid, + StorageCallback callback, + void* userData); + +int storage_exists( + void* ctx, + const char* cid, + StorageCallback callback, + void* userData); + +int storage_start(void* ctx, + StorageCallback callback, + void* userData); + +int storage_stop(void* ctx, + StorageCallback callback, + void* userData); + +int storage_close(void* ctx, + StorageCallback callback, + void* userData); + +// Destroys an instance of a Logos Storage node created with storage_new +int storage_destroy(void* ctx, + StorageCallback callback, + void* userData); + +void storage_set_event_callback(void* ctx, + StorageCallback callback, + void* userData); + +#ifdef __cplusplus +} +#endif + +#endif /* __libstorage__ */ \ No newline at end of file diff --git a/library/libstorage.nim b/library/libstorage.nim new file mode 100644 index 00000000..06b02172 --- /dev/null +++ b/library/libstorage.nim @@ -0,0 +1,571 @@ +# libstorage.nim - C-exported interface for the Storage shared library +# +# This file implements the public C API for libstorage. +# It acts as the bridge between C programs and the internal Nim implementation. +# +# This file defines: +# - Initialization logic for the Nim runtime (once per process) +# - Thread-safe exported procs callable from C +# - Callback registration and invocation for asynchronous communication + +# cdecl is C declaration calling convention. +# It’s the standard way C compilers expect functions to behave: +# 1- Caller cleans up the stack after the call +# 2- Symbol names are exported in a predictable way +# In other termes, it is a glue that makes Nim functions callable as normal C functions. +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} + +# Ensure code is position-independent so it can be built into a shared library (.so). +# In other terms, the code that can run no matter where it’s placed in memory. +{.passc: "-fPIC".} + +when defined(linux): + # Define the canonical name for this library + {.passl: "-Wl,-soname,libstorage.so".} + +import std/[atomics] +import chronicles +import chronos +import chronos/threadsync +import ./storage_context +import ./storage_thread_requests/storage_thread_request +import ./storage_thread_requests/requests/node_lifecycle_request +import ./storage_thread_requests/requests/node_info_request +import ./storage_thread_requests/requests/node_debug_request +import ./storage_thread_requests/requests/node_p2p_request +import ./storage_thread_requests/requests/node_upload_request +import ./storage_thread_requests/requests/node_download_request +import ./storage_thread_requests/requests/node_storage_request +import ./ffi_types + +from ../codex/conf import codexVersion + +logScope: + topics = "libstorage" + +template checkLibstorageParams*( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +) = + if not isNil(ctx): + ctx[].userData = userData + + if isNil(callback): + return RET_MISSING_CALLBACK + +# From Nim doc: +# "the C targets require you to initialize Nim's internals, which is done calling a NimMain function." +# "The name NimMain can be influenced via the --nimMainPrefix:prefix switch." +# "Use --nimMainPrefix:MyLib and the function to call is named MyLibNimMain." +proc libstorageNimMain() {.importc.} + +# Atomic flag to prevent multiple initializations +var initialized: Atomic[bool] + +if defined(android): + # Redirect chronicles to Android System logs + when compiles(defaultChroniclesStream.outputs[0].writer): + defaultChroniclesStream.outputs[0].writer = proc( + logLevel: LogLevel, msg: LogOutputStr + ) {.raises: [].} = + echo logLevel, msg + +# Initializes the Nim runtime and foreign-thread GC +proc initializeLibrary() {.exported.} = + if not initialized.exchange(true): + ## Every Nim library must call `NimMain()` once + libstorageNimMain() + when declared(setupForeignThreadGc): + setupForeignThreadGc() + when declared(nimGC_setStackBottom): + var locals {.volatile, noinit.}: pointer + locals = addr(locals) + nimGC_setStackBottom(locals) + +proc storage_new( + configJson: cstring, callback: StorageCallback, userData: pointer +): pointer {.dynlib, exported.} = + initializeLibrary() + + if isNil(callback): + error "Failed to create Storage instance: the callback is missing." + return nil + + var ctx = storage_context.createStorageContext().valueOr: + let msg = $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return nil + + ctx.userData = userData + + let reqContent = + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE_NODE, configJson) + + storage_context.sendRequestToStorageThread( + ctx, RequestType.LIFECYCLE, reqContent, callback, userData + ).isOkOr: + let msg = $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return nil + + return ctx + +proc storage_version( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + callback( + RET_OK, + cast[ptr cchar](conf.codexVersion), + cast[csize_t](len(conf.codexVersion)), + userData, + ) + + return RET_OK + +proc storage_revision( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + callback( + RET_OK, + cast[ptr cchar](conf.codexRevision), + cast[csize_t](len(conf.codexRevision)), + userData, + ) + + return RET_OK + +proc storage_repo( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeInfoRequest.createShared(NodeInfoMsgType.REPO) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.INFO, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_debug( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeDebugRequest.createShared(NodeDebugMsgType.DEBUG) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DEBUG, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_spr( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeInfoRequest.createShared(NodeInfoMsgType.SPR) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.INFO, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_peer_id( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeInfoRequest.createShared(NodeInfoMsgType.PEERID) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.INFO, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +## Set the log level of the library at runtime. +## It uses updateLogLevel which is a synchronous proc and +## cannot be used inside an async context because of gcsafe issue. +proc storage_log_level( + ctx: ptr StorageContext, + logLevel: cstring, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = + NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, logLevel = logLevel) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DEBUG, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_connect( + ctx: ptr StorageContext, + peerId: cstring, + peerAddressesPtr: ptr cstring, + peerAddressesLength: csize_t, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + var peerAddresses = newSeq[cstring](peerAddressesLength) + let peers = cast[ptr UncheckedArray[cstring]](peerAddressesPtr) + for i in 0 ..< peerAddressesLength: + peerAddresses[i] = peers[i] + + let reqContent = NodeP2PRequest.createShared( + NodeP2PMsgType.CONNECT, peerId = peerId, peerAddresses = peerAddresses + ) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.P2P, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_peer_debug( + ctx: ptr StorageContext, + peerId: cstring, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeDebugRequest.createShared(NodeDebugMsgType.PEER, peerId = peerId) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DEBUG, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_close( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CLOSE_NODE) + var res = storage_context.sendRequestToStorageThread( + ctx, RequestType.LIFECYCLE, reqContent, callback, userData + ) + if res.isErr: + return callback.error(res.error, userData) + + return callback.okOrError(res, userData) + +proc storage_destroy( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let res = storage_context.destroyStorageContext(ctx) + if res.isErr: + return RET_ERR + + return RET_OK + +proc storage_upload_init( + ctx: ptr StorageContext, + filepath: cstring, + chunkSize: csize_t, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = NodeUploadRequest.createShared( + NodeUploadMsgType.INIT, filepath = filepath, chunkSize = chunkSize + ) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.UPLOAD, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_upload_chunk( + ctx: ptr StorageContext, + sessionId: cstring, + data: ptr byte, + len: csize_t, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let chunk = newSeq[byte](len) + copyMem(addr chunk[0], data, len) + + let reqContent = NodeUploadRequest.createShared( + NodeUploadMsgType.CHUNK, sessionId = sessionId, chunk = chunk + ) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.UPLOAD, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_upload_finalize( + ctx: ptr StorageContext, + sessionId: cstring, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = + NodeUploadRequest.createShared(NodeUploadMsgType.FINALIZE, sessionId = sessionId) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.UPLOAD, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_upload_cancel( + ctx: ptr StorageContext, + sessionId: cstring, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = + NodeUploadRequest.createShared(NodeUploadMsgType.CANCEL, sessionId = sessionId) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.UPLOAD, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_upload_file( + ctx: ptr StorageContext, + sessionId: cstring, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent = + NodeUploadRequest.createShared(NodeUploadMsgType.FILE, sessionId = sessionId) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.UPLOAD, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_download_init( + ctx: ptr StorageContext, + cid: cstring, + chunkSize: csize_t, + local: bool, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeDownloadRequest.createShared( + NodeDownloadMsgType.INIT, cid = cid, chunkSize = chunkSize, local = local + ) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DOWNLOAD, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_download_chunk( + ctx: ptr StorageContext, cid: cstring, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CHUNK, cid = cid) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DOWNLOAD, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_download_stream( + ctx: ptr StorageContext, + cid: cstring, + chunkSize: csize_t, + local: bool, + filepath: cstring, + callback: StorageCallback, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeDownloadRequest.createShared( + NodeDownloadMsgType.STREAM, + cid = cid, + chunkSize = chunkSize, + local = local, + filepath = filepath, + ) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DOWNLOAD, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_download_cancel( + ctx: ptr StorageContext, cid: cstring, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CANCEL, cid = cid) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DOWNLOAD, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_download_manifest( + ctx: ptr StorageContext, cid: cstring, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.MANIFEST, cid = cid) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.DOWNLOAD, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_list( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeStorageRequest.createShared(NodeStorageMsgType.LIST) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.STORAGE, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_space( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.STORAGE, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_delete( + ctx: ptr StorageContext, cid: cstring, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeStorageRequest.createShared(NodeStorageMsgType.DELETE, cid = cid) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.STORAGE, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_fetch( + ctx: ptr StorageContext, cid: cstring, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeStorageRequest.createShared(NodeStorageMsgType.FETCH, cid = cid) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.STORAGE, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_exists( + ctx: ptr StorageContext, cid: cstring, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let req = NodeStorageRequest.createShared(NodeStorageMsgType.EXISTS, cid = cid) + + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.STORAGE, req, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_start( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent: ptr NodeLifecycleRequest = + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.LIFECYCLE, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_stop( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibstorageParams(ctx, callback, userData) + + let reqContent: ptr NodeLifecycleRequest = + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE) + let res = storage_context.sendRequestToStorageThread( + ctx, RequestType.LIFECYCLE, reqContent, callback, userData + ) + + return callback.okOrError(res, userData) + +proc storage_set_event_callback( + ctx: ptr StorageContext, callback: StorageCallback, userData: pointer +) {.dynlib, exportc.} = + initializeLibrary() + ctx[].eventCallback = cast[pointer](callback) + ctx[].eventUserData = userData diff --git a/library/storage_context.nim b/library/storage_context.nim new file mode 100644 index 00000000..260b6a98 --- /dev/null +++ b/library/storage_context.nim @@ -0,0 +1,227 @@ +## This file defines the Logos Storage context and its thread flow: +## 1. Client enqueues a request and signals the Logos Storage thread. +## 2. The Logos Storage thread dequeues the request and sends an ack (reqReceivedSignal). +## 3. The Logos Storage thread executes the request asynchronously. +## 4. On completion, the Logos Storage thread invokes the client callback with the result and userData. + +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} +{.passc: "-fPIC".} + +import std/[options, locks, atomics] +import chronicles +import chronos +import chronos/threadsync +import taskpools/channels_spsc_single +import ./ffi_types +import ./storage_thread_requests/[storage_thread_request] + +from ../codex/codex import CodexServer + +logScope: + topics = "libstorage" + +type StorageContext* = object + thread: Thread[(ptr StorageContext)] + + # This lock is only necessary while we use a SP Channel and while the signalling + # between threads assumes that there aren't concurrent requests. + # Rearchitecting the signaling + migrating to a MP Channel will allow us to receive + # requests concurrently and spare us the need of locks + lock: Lock + + # Channel to send requests to the Logos Storage thread. + # Requests will be popped from this channel. + reqChannel: ChannelSPSCSingle[ptr StorageThreadRequest] + + # To notify the Logos Storage thread that a request is ready + reqSignal: ThreadSignalPtr + + # To notify the client thread that the request was received. + # It is acknowledgment signal (handshake). + reqReceivedSignal: ThreadSignalPtr + + # Custom state attached by the client to a request, + # returned when its callback is invoked + userData*: pointer + + # Function called by the library to notify the client of global events + eventCallback*: pointer + + # Custom state attached by the client to the context, + # returned with every event callback + eventUserData*: pointer + + # Set to false to stop the Logos Storage thread (during storage_destroy) + running: Atomic[bool] + +template callEventCallback(ctx: ptr StorageContext, eventName: string, body: untyped) = + ## Template used to notify the client of global events + ## Example: onConnectionChanged, onProofMissing, etc. + if isNil(ctx[].eventCallback): + error eventName & " - eventCallback is nil" + return + + foreignThreadGc: + try: + let event = body + cast[StorageCallback](ctx[].eventCallback)( + RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData + ) + except CatchableError: + let msg = + "Exception " & eventName & " when calling 'eventCallBack': " & + getCurrentExceptionMsg() + cast[StorageCallback](ctx[].eventCallback)( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData + ) + +proc sendRequestToStorageThread*( + ctx: ptr StorageContext, + reqType: RequestType, + reqContent: pointer, + callback: StorageCallback, + userData: pointer, + timeout = InfiniteDuration, +): Result[void, string] = + ctx.lock.acquire() + + defer: + ctx.lock.release() + + let req = StorageThreadRequest.createShared(reqType, reqContent, callback, userData) + + # Send the request to the Logos Storage thread + let sentOk = ctx.reqChannel.trySend(req) + if not sentOk: + deallocShared(req) + return err("Failed to send request to the Logos Storage thread: " & $req[]) + + # Notify the Logos Storage thread that a request is available + let fireSyncRes = ctx.reqSignal.fireSync() + if fireSyncRes.isErr(): + deallocShared(req) + return err( + "Failed to send request to the Logos Storage thread: unable to fireSync: " & + $fireSyncRes.error + ) + + if fireSyncRes.get() == false: + deallocShared(req) + return + err("Failed to send request to the Logos Storage thread: fireSync timed out.") + + # Wait until the Logos Storage thread properly received the request + let res = ctx.reqReceivedSignal.waitSync(timeout) + if res.isErr(): + deallocShared(req) + return err( + "Failed to send request to the Logos Storage thread: unable to receive reqReceivedSignal signal." + ) + + ## Notice that in case of "ok", the deallocShared(req) is performed by the Logos Storage thread in the + ## process proc. See the 'storage_thread_request.nim' module for more details. + ok() + +proc runStorage(ctx: ptr StorageContext) {.async: (raises: []).} = + var storage: CodexServer + + while true: + try: + # Wait until a request is available + await ctx.reqSignal.wait() + except Exception as e: + error "Failure in run Logos Storage thread while waiting for reqSignal.", + error = e.msg + continue + + # If storage_destroy was called, exit the loop + if ctx.running.load == false: + break + + var request: ptr StorageThreadRequest + + # Pop a request from the channel + let recvOk = ctx.reqChannel.tryRecv(request) + if not recvOk: + error "Failure in run Storage: unable to receive request in Logos Storage thread." + continue + + # yield immediately to the event loop + # with asyncSpawn only, the code will be executed + # synchronously until the first await + asyncSpawn ( + proc() {.async.} = + await sleepAsync(0) + await StorageThreadRequest.process(request, addr storage) + )() + + # Notify the main thread that we picked up the request + let fireRes = ctx.reqReceivedSignal.fireSync() + if fireRes.isErr(): + error "Failure in run Storage: unable to fire back to requester thread.", + error = fireRes.error + +proc run(ctx: ptr StorageContext) {.thread.} = + waitFor runStorage(ctx) + +proc createStorageContext*(): Result[ptr StorageContext, string] = + ## This proc is called from the main thread and it creates + ## the Logos Storage working thread. + + # Allocates a StorageContext in shared memory (for the main thread) + var ctx = createShared(StorageContext, 1) + + # This signal is used by the main side to wake the Logos Storage thread + # when a new request is enqueued. + ctx.reqSignal = ThreadSignalPtr.new().valueOr: + return + err("Failed to create a context: unable to create reqSignal ThreadSignalPtr.") + + # Used to let the caller know that the Logos Storage thread has + # acknowledged / picked up a request (like a handshake). + ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: + return err( + "Failed to create Logos Storage context: unable to create reqReceivedSignal ThreadSignalPtr." + ) + + # Protects shared state inside StorageContext + ctx.lock.initLock() + + # Logos Storage thread will loop until storage_destroy is called + ctx.running.store(true) + + try: + createThread(ctx.thread, run, ctx) + except ValueError, ResourceExhaustedError: + freeShared(ctx) + return err( + "Failed to create Logos Storage context: unable to create thread: " & + getCurrentExceptionMsg() + ) + + return ok(ctx) + +proc destroyStorageContext*(ctx: ptr StorageContext): Result[void, string] = + # Signal the Logos Storage thread to stop + ctx.running.store(false) + + # Wake the worker up if it's waiting + let signaledOnTime = ctx.reqSignal.fireSync().valueOr: + return err("Failed to destroy Logos Storage context: " & $error) + + if not signaledOnTime: + return err( + "Failed to destroy Logos Storage context: unable to get signal reqSignal on time in destroyStorageContext." + ) + + # Wait for the thread to finish + joinThread(ctx.thread) + + # Clean up + ctx.lock.deinitLock() + ?ctx.reqSignal.close() + ?ctx.reqReceivedSignal.close() + freeShared(ctx) + + return ok() diff --git a/library/storage_thread_requests/requests/node_debug_request.nim b/library/storage_thread_requests/requests/node_debug_request.nim new file mode 100644 index 00000000..405bce5e --- /dev/null +++ b/library/storage_thread_requests/requests/node_debug_request.nim @@ -0,0 +1,126 @@ +{.push raises: [].} + +## This file contains the debug info available with Logos Storage. +## The DEBUG type will return info about the P2P node. +## The PEER type is available only with storage_enable_api_debug_peers flag. +## It will return info about a specific peer if available. + +import std/[options] +import chronos +import chronicles +import codexdht/discv5/spr +import ../../alloc +import ../../../codex/conf +import ../../../codex/rest/json +import ../../../codex/node + +from ../../../codex/codex import CodexServer, node + +logScope: + topics = "libstorage libstoragedebug" + +type NodeDebugMsgType* = enum + DEBUG + PEER + LOG_LEVEL + +type NodeDebugRequest* = object + operation: NodeDebugMsgType + peerId: cstring + logLevel: cstring + +proc createShared*( + T: type NodeDebugRequest, + op: NodeDebugMsgType, + peerId: cstring = "", + logLevel: cstring = "", +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].peerId = peerId.alloc() + ret[].logLevel = logLevel.alloc() + return ret + +proc destroyShared(self: ptr NodeDebugRequest) = + deallocShared(self[].peerId) + deallocShared(self[].logLevel) + deallocShared(self) + +proc getDebug( + storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + let node = storage[].node + let table = RestRoutingTable.init(node.discovery.protocol.routingTable) + + let json = + %*{ + "id": $node.switch.peerInfo.peerId, + "addrs": node.switch.peerInfo.addrs.mapIt($it), + "spr": + if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "", + "announceAddresses": node.discovery.announceAddrs, + "table": table, + } + + return ok($json) + +proc getPeer( + storage: ptr CodexServer, peerId: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + when storage_enable_api_debug_peers: + let node = storage[].node + let res = PeerId.init($peerId) + if res.isErr: + return err("Failed to get peer: invalid peer ID " & $peerId & ": " & $res.error()) + + let id = res.get() + + try: + let peerRecord = await node.findPeer(id) + if peerRecord.isNone: + return err("Failed to get peer: peer not found") + + return ok($ %RestPeerRecord.init(peerRecord.get())) + except CancelledError: + return err("Failed to get peer: operation cancelled") + except CatchableError as e: + return err("Failed to get peer: " & e.msg) + else: + return err("Failed to get peer: peer debug API is disabled") + +proc updateLogLevel( + storage: ptr CodexServer, logLevel: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + try: + {.gcsafe.}: + updateLogLevel($logLevel) + except ValueError as err: + return err("Failed to update log level: invalid value for log level: " & err.msg) + + return ok("") + +proc process*( + self: ptr NodeDebugRequest, storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeDebugMsgType.DEBUG: + let res = (await getDebug(storage)) + if res.isErr: + error "Failed to get DEBUG.", error = res.error + return err($res.error) + return res + of NodeDebugMsgType.PEER: + let res = (await getPeer(storage, self.peerId)) + if res.isErr: + error "Failed to get PEER.", error = res.error + return err($res.error) + return res + of NodeDebugMsgType.LOG_LEVEL: + let res = (await updateLogLevel(storage, self.logLevel)) + if res.isErr: + error "Failed to update LOG_LEVEL.", error = res.error + return err($res.error) + return res diff --git a/library/storage_thread_requests/requests/node_download_request.nim b/library/storage_thread_requests/requests/node_download_request.nim new file mode 100644 index 00000000..1d8ef59c --- /dev/null +++ b/library/storage_thread_requests/requests/node_download_request.nim @@ -0,0 +1,338 @@ +{.push raises: [].} + +## This file contains the download request. +## A session is created for each download identified by the CID, +## allowing to resume, pause and cancel the download (using chunks). +## +## There are two ways to download a file: +## 1. Via chunks: the cid parameter is the CID of the file to download. Steps are: +## - INIT: initializes the download session +## - CHUNK: downloads the next chunk of the file +## - CANCEL: cancels the download session +## 2. Via stream. +## - INIT: initializes the download session +## - STREAM: downloads the file in a streaming manner, calling +## the onChunk handler for each chunk and / or writing to a file if filepath is set. +## - CANCEL: cancels the download session + +import std/[options, streams] +import chronos +import chronicles +import libp2p/stream/[lpstream] +import serde/json as serde +import ../../alloc +import ../../../codex/units +import ../../../codex/codextypes + +from ../../../codex/codex import CodexServer, node +from ../../../codex/node import retrieve, fetchManifest +from ../../../codex/rest/json import `%`, RestContent +from libp2p import Cid, init, `$` + +logScope: + topics = "libstorage libstoragedownload" + +type NodeDownloadMsgType* = enum + INIT + CHUNK + STREAM + CANCEL + MANIFEST + +type OnChunkHandler = proc(bytes: seq[byte]): void {.gcsafe, raises: [].} + +type NodeDownloadRequest* = object + operation: NodeDownloadMsgType + cid: cstring + chunkSize: csize_t + local: bool + filepath: cstring + +type + DownloadSessionId* = string + DownloadSessionCount* = int + DownloadSession* = object + stream: LPStream + chunkSize: int + +var downloadSessions {.threadvar.}: Table[DownloadSessionId, DownloadSession] + +proc createShared*( + T: type NodeDownloadRequest, + op: NodeDownloadMsgType, + cid: cstring = "", + chunkSize: csize_t = 0, + local: bool = false, + filepath: cstring = "", +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].cid = cid.alloc() + ret[].chunkSize = chunkSize + ret[].local = local + ret[].filepath = filepath.alloc() + + return ret + +proc destroyShared(self: ptr NodeDownloadRequest) = + deallocShared(self[].cid) + deallocShared(self[].filepath) + deallocShared(self) + +proc init( + storage: ptr CodexServer, cCid: cstring = "", chunkSize: csize_t = 0, local: bool +): Future[Result[string, string]] {.async: (raises: []).} = + ## Init a new session to download the file identified by cid. + ## + ## If the session already exists, do nothing and return ok. + ## Meaning that a cid can only have one active download session. + ## If the chunkSize is 0, the default block size will be used. + ## If local is true, the file will be retrived from the local store. + + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to download locally: cannot parse cid: " & $cCid) + + if downloadSessions.contains($cid): + return ok("Download session already exists.") + + let node = storage[].node + var stream: LPStream + + try: + let res = await node.retrieve(cid.get(), local) + if res.isErr(): + return err("Failed to init the download: " & res.error.msg) + stream = res.get() + except CancelledError: + downloadSessions.del($cid) + return err("Failed to init the download: download cancelled.") + + let blockSize = if chunkSize.int > 0: chunkSize.int else: DefaultBlockSize.int + downloadSessions[$cid] = DownloadSession(stream: stream, chunkSize: blockSize) + + return ok("") + +proc chunk( + storage: ptr CodexServer, cCid: cstring = "", onChunk: OnChunkHandler +): Future[Result[string, string]] {.async: (raises: []).} = + ## Download the next chunk of the file identified by cid. + ## The chunk is passed to the onChunk handler. + ## + ## If the stream is at EOF, return ok with empty string. + ## + ## If an error is raised while reading the stream, the session is deleted + ## and an error is returned. + + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to download locally: cannot parse cid: " & $cCid) + + if not downloadSessions.contains($cid): + return err("Failed to download chunk: no session for cid " & $cid) + + var session: DownloadSession + try: + session = downloadSessions[$cid] + except KeyError: + return err("Failed to download chunk: no session for cid " & $cid) + + let stream = session.stream + if stream.atEof: + return ok("") + + let chunkSize = session.chunkSize + var buf = newSeq[byte](chunkSize) + + try: + let read = await stream.readOnce(addr buf[0], buf.len) + buf.setLen(read) + except LPStreamError as e: + await stream.close() + downloadSessions.del($cid) + return err("Failed to download chunk: " & $e.msg) + except CancelledError: + await stream.close() + downloadSessions.del($cid) + return err("Failed to download chunk: download cancelled.") + + if buf.len <= 0: + return err("Failed to download chunk: no data") + + onChunk(buf) + + return ok("") + +proc streamData( + storage: ptr CodexServer, + stream: LPStream, + onChunk: OnChunkHandler, + chunkSize: csize_t, + filepath: cstring, +): Future[Result[string, string]] {. + async: (raises: [CancelledError, LPStreamError, IOError]) +.} = + let blockSize = if chunkSize.int > 0: chunkSize.int else: DefaultBlockSize.int + var buf = newSeq[byte](blockSize) + var read = 0 + var outputStream: OutputStreamHandle + var filedest: string = $filepath + + try: + if filepath != "": + outputStream = filedest.fileOutput() + + while not stream.atEof: + ## Yield immediately to the event loop + ## It gives a chance to cancel request to be processed + await sleepAsync(0) + + let read = await stream.readOnce(addr buf[0], buf.len) + buf.setLen(read) + + if buf.len <= 0: + break + + onChunk(buf) + + if outputStream != nil: + outputStream.write(buf) + + if outputStream != nil: + outputStream.close() + finally: + if outputStream != nil: + outputStream.close() + + return ok("") + +proc stream( + storage: ptr CodexServer, + cCid: cstring, + chunkSize: csize_t, + local: bool, + filepath: cstring, + onChunk: OnChunkHandler, +): Future[Result[string, string]] {.raises: [], async: (raises: []).} = + ## Stream the file identified by cid, calling the onChunk handler for each chunk + ## and / or writing to a file if filepath is set. + ## + ## If local is true, the file will be retrieved from the local store. + + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to stream: cannot parse cid: " & $cCid) + + if not downloadSessions.contains($cid): + return err("Failed to stream: no session for cid " & $cid) + + var session: DownloadSession + try: + session = downloadSessions[$cid] + except KeyError: + return err("Failed to stream: no session for cid " & $cid) + + let node = storage[].node + + try: + let res = + await noCancel storage.streamData(session.stream, onChunk, chunkSize, filepath) + if res.isErr: + return err($res.error) + except LPStreamError as e: + return err("Failed to stream file: " & $e.msg) + except IOError as e: + return err("Failed to stream file: " & $e.msg) + finally: + if session.stream != nil: + await session.stream.close() + downloadSessions.del($cid) + + return ok("") + +proc cancel( + storage: ptr CodexServer, cCid: cstring +): Future[Result[string, string]] {.raises: [], async: (raises: []).} = + ## Cancel the download session identified by cid. + ## This operation is not supported when using the stream mode, + ## because the worker will be busy downloading the file. + + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to cancel : cannot parse cid: " & $cCid) + + if not downloadSessions.contains($cid): + # The session is already cancelled + return ok("") + + var session: DownloadSession + try: + session = downloadSessions[$cid] + except KeyError: + # The session is already cancelled + return ok("") + + let stream = session.stream + await stream.close() + downloadSessions.del($cCid) + + return ok("") + +proc manifest( + storage: ptr CodexServer, cCid: cstring +): Future[Result[string, string]] {.raises: [], async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to fetch manifest: cannot parse cid: " & $cCid) + + try: + let node = storage[].node + let manifest = await node.fetchManifest(cid.get()) + if manifest.isErr: + return err("Failed to fetch manifest: " & manifest.error.msg) + + return ok(serde.toJson(manifest.get())) + except CancelledError: + return err("Failed to fetch manifest: download cancelled.") + +proc process*( + self: ptr NodeDownloadRequest, storage: ptr CodexServer, onChunk: OnChunkHandler +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeDownloadMsgType.INIT: + let res = (await init(storage, self.cid, self.chunkSize, self.local)) + if res.isErr: + error "Failed to INIT.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.CHUNK: + let res = (await chunk(storage, self.cid, onChunk)) + if res.isErr: + error "Failed to CHUNK.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.STREAM: + let res = ( + await stream( + storage, self.cid, self.chunkSize, self.local, self.filepath, onChunk + ) + ) + if res.isErr: + error "Failed to STREAM.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.CANCEL: + let res = (await cancel(storage, self.cid)) + if res.isErr: + error "Failed to CANCEL.", error = res.error + return err($res.error) + return res + of NodeDownloadMsgType.MANIFEST: + let res = (await manifest(storage, self.cid)) + if res.isErr: + error "Failed to MANIFEST.", error = res.error + return err($res.error) + return res diff --git a/library/storage_thread_requests/requests/node_info_request.nim b/library/storage_thread_requests/requests/node_info_request.nim new file mode 100644 index 00000000..cbf364e8 --- /dev/null +++ b/library/storage_thread_requests/requests/node_info_request.nim @@ -0,0 +1,76 @@ +## This file contains the lifecycle request type that will be handled. + +import std/[options] +import chronos +import chronicles +import confutils +import codexdht/discv5/spr +import ../../../codex/conf +import ../../../codex/rest/json +import ../../../codex/node + +from ../../../codex/codex import CodexServer, config, node + +logScope: + topics = "libstorage libstorageinfo" + +type NodeInfoMsgType* = enum + REPO + SPR + PEERID + +type NodeInfoRequest* = object + operation: NodeInfoMsgType + +proc createShared*(T: type NodeInfoRequest, op: NodeInfoMsgType): ptr type T = + var ret = createShared(T) + ret[].operation = op + return ret + +proc destroyShared(self: ptr NodeInfoRequest) = + deallocShared(self) + +proc getRepo( + storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + return ok($(storage[].config.dataDir)) + +proc getSpr( + storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + let spr = storage[].node.discovery.dhtRecord + if spr.isNone: + return err("Failed to get SPR: no SPR record found.") + + return ok(spr.get.toURI) + +proc getPeerId( + storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + return ok($storage[].node.switch.peerInfo.peerId) + +proc process*( + self: ptr NodeInfoRequest, storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of REPO: + let res = (await getRepo(storage)) + if res.isErr: + error "Failed to get REPO.", error = res.error + return err($res.error) + return res + of SPR: + let res = (await getSpr(storage)) + if res.isErr: + error "Failed to get SPR.", error = res.error + return err($res.error) + return res + of PEERID: + let res = (await getPeerId(storage)) + if res.isErr: + error "Failed to get PEERID.", error = res.error + return err($res.error) + return res diff --git a/library/storage_thread_requests/requests/node_lifecycle_request.nim b/library/storage_thread_requests/requests/node_lifecycle_request.nim new file mode 100644 index 00000000..2963f795 --- /dev/null +++ b/library/storage_thread_requests/requests/node_lifecycle_request.nim @@ -0,0 +1,188 @@ +## This file contains the lifecycle request type that will be handled. +## CREATE_NODE: create a new Logos Storage node with the provided config.json. +## START_NODE: start the provided Logos Storage node. +## STOP_NODE: stop the provided Logos Storage node. + +import std/[options, json, strutils, net, os] +import codexdht/discv5/spr +import stew/shims/parseutils +import contractabi/address +import chronos +import chronicles +import results +import confutils +import confutils/std/net +import confutils/defs +import libp2p +import json_serialization +import json_serialization/std/[options, net] +import ../../alloc +import ../../../codex/conf +import ../../../codex/utils +import ../../../codex/utils/[keyutils, fileutils] +import ../../../codex/units + +from ../../../codex/codex import CodexServer, new, start, stop, close + +logScope: + topics = "libstorage libstoragelifecycle" + +type NodeLifecycleMsgType* = enum + CREATE_NODE + START_NODE + STOP_NODE + CLOSE_NODE + +proc readValue*[T: InputFile | InputDir | OutPath | OutDir | OutFile]( + r: var JsonReader, val: var T +) = + val = T(r.readValue(string)) + +proc readValue*(r: var JsonReader, val: var MultiAddress) = + val = MultiAddress.init(r.readValue(string)).get() + +proc readValue*(r: var JsonReader, val: var NatConfig) = + let res = NatConfig.parse(r.readValue(string)) + if res.isErr: + raise + newException(SerializationError, "Cannot parse the NAT config: " & res.error()) + val = res.get() + +proc readValue*(r: var JsonReader, val: var SignedPeerRecord) = + let res = SignedPeerRecord.parse(r.readValue(string)) + if res.isErr: + raise + newException(SerializationError, "Cannot parse the signed peer: " & res.error()) + val = res.get() + +proc readValue*(r: var JsonReader, val: var ThreadCount) = + val = ThreadCount(r.readValue(int)) + +proc readValue*(r: var JsonReader, val: var NBytes) = + val = NBytes(r.readValue(int)) + +proc readValue*(r: var JsonReader, val: var Duration) = + var dur: Duration + let input = r.readValue(string) + let count = parseDuration(input, dur) + if count == 0: + raise newException(SerializationError, "Cannot parse the duration: " & input) + val = dur + +proc readValue*(r: var JsonReader, val: var EthAddress) = + val = EthAddress.init(r.readValue(string)).get() + +type NodeLifecycleRequest* = object + operation: NodeLifecycleMsgType + configJson: cstring + +proc createShared*( + T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configJson: cstring = "" +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].configJson = configJson.alloc() + return ret + +proc destroyShared(self: ptr NodeLifecycleRequest) = + deallocShared(self[].configJson) + deallocShared(self) + +proc createStorage( + configJson: cstring +): Future[Result[CodexServer, string]] {.async: (raises: []).} = + var conf: CodexConf + + try: + conf = CodexConf.load( + version = codexFullVersion, + envVarsPrefix = "storage", + cmdLine = @[], + secondarySources = proc( + config: CodexConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + if configJson.len > 0: + sources.addConfigFileContent(Json, $(configJson)) + , + ) + except ConfigurationError as e: + return err("Failed to create Storage: unable to load configuration: " & e.msg) + + conf.setupLogging() + + try: + {.gcsafe.}: + updateLogLevel(conf.logLevel) + except ValueError as err: + return err("Failed to create Storage: invalid value for log level: " & err.msg) + + conf.setupMetrics() + + if not (checkAndCreateDataDir((conf.dataDir).string)): + # We are unable to access/create data folder or data folder's + # permissions are insecure. + return err( + "Failed to create Storage: unable to access/create data folder or data folder's permissions are insecure." + ) + + if not (checkAndCreateDataDir((conf.dataDir / "repo"))): + # We are unable to access/create data folder or data folder's + # permissions are insecure. + return err( + "Failed to create Storage: unable to access/create data folder or data folder's permissions are insecure." + ) + + let keyPath = + if isAbsolute(conf.netPrivKeyFile): + conf.netPrivKeyFile + else: + conf.dataDir / conf.netPrivKeyFile + let privateKey = setupKey(keyPath) + if privateKey.isErr: + return err("Failed to create Storage: unable to get the private key.") + let pk = privateKey.get() + + conf.apiBindAddress = string.none + + let server = + try: + CodexServer.new(conf, pk) + except Exception as exc: + return err("Failed to create Storage: " & exc.msg) + + return ok(server) + +proc process*( + self: ptr NodeLifecycleRequest, storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of CREATE_NODE: + storage[] = ( + await createStorage( + self.configJson # , self.appCallbacks + ) + ).valueOr: + error "Failed to CREATE_NODE.", error = error + return err($error) + of START_NODE: + try: + await storage[].start() + except Exception as e: + error "Failed to START_NODE.", error = e.msg + return err(e.msg) + of STOP_NODE: + try: + await storage[].stop() + except Exception as e: + error "Failed to STOP_NODE.", error = e.msg + return err(e.msg) + of CLOSE_NODE: + try: + await storage[].close() + except Exception as e: + error "Failed to STOP_NODE.", error = e.msg + return err(e.msg) + return ok("") diff --git a/library/storage_thread_requests/requests/node_p2p_request.nim b/library/storage_thread_requests/requests/node_p2p_request.nim new file mode 100644 index 00000000..dd06eb39 --- /dev/null +++ b/library/storage_thread_requests/requests/node_p2p_request.nim @@ -0,0 +1,95 @@ +{.push raises: [].} + +## This file contains the P2p request type that will be handled. +## CONNECT: connect to a peer with the provided peer ID and optional addresses. + +import std/[options] +import chronos +import chronicles +import libp2p +import ../../alloc +import ../../../codex/node + +from ../../../codex/codex import CodexServer, node + +logScope: + topics = "libstorage libstoragep2p" + +type NodeP2PMsgType* = enum + CONNECT + +type NodeP2PRequest* = object + operation: NodeP2PMsgType + peerId: cstring + peerAddresses: seq[cstring] + +proc createShared*( + T: type NodeP2PRequest, + op: NodeP2PMsgType, + peerId: cstring = "", + peerAddresses: seq[cstring] = @[], +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].peerId = peerId.alloc() + ret[].peerAddresses = peerAddresses + return ret + +proc destroyShared(self: ptr NodeP2PRequest) = + deallocShared(self[].peerId) + deallocShared(self) + +proc connect( + storage: ptr CodexServer, peerId: cstring, peerAddresses: seq[cstring] = @[] +): Future[Result[string, string]] {.async: (raises: []).} = + let node = storage[].node + let res = PeerId.init($peerId) + if res.isErr: + return err("Failed to connect to peer: invalid peer ID: " & $res.error()) + + let id = res.get() + + let addresses = + if peerAddresses.len > 0: + var addrs: seq[MultiAddress] + for addrStr in peerAddresses: + let res = MultiAddress.init($addrStr) + if res.isOk: + addrs.add(res[]) + else: + return err("Failed to connect to peer: invalid address: " & $addrStr) + addrs + else: + try: + let peerRecord = await node.findPeer(id) + if peerRecord.isNone: + return err("Failed to connect to peer: peer not found.") + + peerRecord.get().addresses.mapIt(it.address) + except CancelledError: + return err("Failed to connect to peer: operation cancelled.") + except CatchableError as e: + return err("Failed to connect to peer: " & $e.msg) + + try: + await node.connect(id, addresses) + except CancelledError: + return err("Failed to connect to peer: operation cancelled.") + except CatchableError as e: + return err("Failed to connect to peer: " & $e.msg) + + return ok("") + +proc process*( + self: ptr NodeP2PRequest, storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeP2PMsgType.CONNECT: + let res = (await connect(storage, self.peerId, self.peerAddresses)) + if res.isErr: + error "Failed to CONNECT.", error = res.error + return err($res.error) + return res diff --git a/library/storage_thread_requests/requests/node_storage_request.nim b/library/storage_thread_requests/requests/node_storage_request.nim new file mode 100644 index 00000000..72ee14f9 --- /dev/null +++ b/library/storage_thread_requests/requests/node_storage_request.nim @@ -0,0 +1,180 @@ +{.push raises: [].} + +## This file contains the node storage request. +## 4 operations are available: +## - LIST: list all manifests stored in the node. +## - DELETE: Deletes either a single block or an entire dataset from the local node. +## - FETCH: download a file from the network to the local node. +## - SPACE: get the amount of space used by the local node. +## - EXISTS: check the existence of a cid in a node (local store). + +import std/[options] +import chronos +import chronicles +import libp2p/stream/[lpstream] +import serde/json as serde +import ../../alloc +import ../../../codex/units +import ../../../codex/manifest +import ../../../codex/stores/repostore + +from ../../../codex/codex import CodexServer, node, repoStore +from ../../../codex/node import + iterateManifests, fetchManifest, fetchDatasetAsyncTask, delete, hasLocalBlock +from libp2p import Cid, init, `$` + +logScope: + topics = "libstorage libstoragestorage" + +type NodeStorageMsgType* = enum + LIST + DELETE + FETCH + SPACE + EXISTS + +type NodeStorageRequest* = object + operation: NodeStorageMsgType + cid: cstring + +type StorageSpace = object + totalBlocks* {.serialize.}: Natural + quotaMaxBytes* {.serialize.}: NBytes + quotaUsedBytes* {.serialize.}: NBytes + quotaReservedBytes* {.serialize.}: NBytes + +proc createShared*( + T: type NodeStorageRequest, op: NodeStorageMsgType, cid: cstring = "" +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].cid = cid.alloc() + + return ret + +proc destroyShared(self: ptr NodeStorageRequest) = + deallocShared(self[].cid) + deallocShared(self) + +type ManifestWithCid = object + cid {.serialize.}: string + manifest {.serialize.}: Manifest + +proc list( + storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + var manifests = newSeq[ManifestWithCid]() + proc onManifest(cid: Cid, manifest: Manifest) {.raises: [], gcsafe.} = + manifests.add(ManifestWithCid(cid: $cid, manifest: manifest)) + + try: + let node = storage[].node + await node.iterateManifests(onManifest) + except CancelledError: + return err("Failed to list manifests: cancelled operation.") + except CatchableError as err: + return err("Failed to list manifest: : " & err.msg) + + return ok(serde.toJson(manifests)) + +proc delete( + storage: ptr CodexServer, cCid: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to delete the data: cannot parse cid: " & $cCid) + + let node = storage[].node + try: + let res = await node.delete(cid.get()) + if res.isErr: + return err("Failed to delete the data: " & res.error.msg) + except CancelledError: + return err("Failed to delete the data: cancelled operation.") + except CatchableError as err: + return err("Failed to delete the data: " & err.msg) + + return ok("") + +proc fetch( + storage: ptr CodexServer, cCid: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to fetch the data: cannot parse cid: " & $cCid) + + try: + let node = storage[].node + let manifest = await node.fetchManifest(cid.get()) + if manifest.isErr: + return err("Failed to fetch the data: " & manifest.error.msg) + + node.fetchDatasetAsyncTask(manifest.get()) + + return ok(serde.toJson(manifest.get())) + except CancelledError: + return err("Failed to fetch the data: download cancelled.") + +proc space( + storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + let repoStore = storage[].repoStore + let space = StorageSpace( + totalBlocks: repoStore.totalBlocks, + quotaMaxBytes: repoStore.quotaMaxBytes, + quotaUsedBytes: repoStore.quotaUsedBytes, + quotaReservedBytes: repoStore.quotaReservedBytes, + ) + return ok(serde.toJson(space)) + +proc exists( + storage: ptr CodexServer, cCid: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + let cid = Cid.init($cCid) + if cid.isErr: + return err("Failed to check the data existence: cannot parse cid: " & $cCid) + + try: + let node = storage[].node + let exists = await node.hasLocalBlock(cid.get()) + return ok($exists) + except CancelledError: + return err("Failed to check the data existence: operation cancelled.") + +proc process*( + self: ptr NodeStorageRequest, storage: ptr CodexServer +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeStorageMsgType.LIST: + let res = (await list(storage)) + if res.isErr: + error "Failed to LIST.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.DELETE: + let res = (await delete(storage, self.cid)) + if res.isErr: + error "Failed to DELETE.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.FETCH: + let res = (await fetch(storage, self.cid)) + if res.isErr: + error "Failed to FETCH.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.SPACE: + let res = (await space(storage)) + if res.isErr: + error "Failed to SPACE.", error = res.error + return err($res.error) + return res + of NodeStorageMsgType.EXISTS: + let res = (await exists(storage, self.cid)) + if res.isErr: + error "Failed to EXISTS.", error = res.error + return err($res.error) + return res diff --git a/library/storage_thread_requests/requests/node_upload_request.nim b/library/storage_thread_requests/requests/node_upload_request.nim new file mode 100644 index 00000000..17ef1e8e --- /dev/null +++ b/library/storage_thread_requests/requests/node_upload_request.nim @@ -0,0 +1,372 @@ +{.push raises: [].} + +## This file contains the upload request. +## A session is created for each upload allowing to resume, +## pause and cancel uploads (using chunks). +## +## There are two ways to upload a file: +## 1. Via chunks: the filepath parameter is the data filename. Steps are: +## - INIT: creates a new upload session and returns its ID. +## - CHUNK: sends a chunk of data to the upload session. +## - FINALIZE: finalizes the upload and returns the CID of the uploaded file. +## - CANCEL: cancels the upload session. +## +## 2. Directly from a file path: the filepath has to be absolute. +## - INIT: creates a new upload session and returns its ID +## - FILE: starts the upload and returns the CID of the uploaded file +## - CANCEL: cancels the upload session. + +import std/[options, os, mimetypes] +import chronos +import chronicles +import questionable +import questionable/results +import faststreams/inputs +import libp2p/stream/[bufferstream, lpstream] +import ../../alloc +import ../../../codex/units +import ../../../codex/codextypes + +from ../../../codex/codex import CodexServer, node +from ../../../codex/node import store +from libp2p import Cid, `$` + +logScope: + topics = "libstorage libstorageupload" + +type NodeUploadMsgType* = enum + INIT + CHUNK + FINALIZE + CANCEL + FILE + +type OnProgressHandler = proc(bytes: int): void {.gcsafe, raises: [].} + +type NodeUploadRequest* = object + operation: NodeUploadMsgType + sessionId: cstring + filepath: cstring + chunk: seq[byte] + chunkSize: csize_t + +type + UploadSessionId* = string + UploadSessionCount* = int + UploadSession* = object + stream: BufferStream + fut: Future[?!Cid] + filepath: string + chunkSize: int + onProgress: OnProgressHandler + +var uploadSessions {.threadvar.}: Table[UploadSessionId, UploadSession] +var nexUploadSessionCount {.threadvar.}: UploadSessionCount + +proc createShared*( + T: type NodeUploadRequest, + op: NodeUploadMsgType, + sessionId: cstring = "", + filepath: cstring = "", + chunk: seq[byte] = @[], + chunkSize: csize_t = 0, +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].sessionId = sessionId.alloc() + ret[].filepath = filepath.alloc() + ret[].chunk = chunk + ret[].chunkSize = chunkSize + + return ret + +proc destroyShared(self: ptr NodeUploadRequest) = + deallocShared(self[].filepath) + deallocShared(self[].sessionId) + deallocShared(self) + +proc init( + storage: ptr CodexServer, filepath: cstring = "", chunkSize: csize_t = 0 +): Future[Result[string, string]] {.async: (raises: []).} = + ## Init a new session upload and return its ID. + ## The session contains the future corresponding to the + ## `node.store` call. + ## The filepath can be: + ## - the filename when uploading via chunks + ## - the absolute path to a file when uploading directly. + ## The mimetype is deduced from the filename extension. + ## + ## The chunkSize matches by default the block size used to store the file. + ## + ## A callback `onBlockStore` is provided to `node.store` to + ## report the progress of the upload. This callback will check + ## that an `onProgress` handler is set in the session + ## and call it with the number of bytes stored each time a block + ## is stored. + + var filenameOpt, mimetypeOpt = string.none + + if isAbsolute($filepath): + if not fileExists($filepath): + return err( + "Failed to create an upload session, the filepath does not exist: " & $filepath + ) + + if filepath != "": + let (_, name, ext) = splitFile($filepath) + + filenameOpt = (name & ext).some + + if ext != "": + let extNoDot = + if ext.len > 0: + ext[1 ..^ 1] + else: + "" + let mime = newMimetypes() + let mimetypeStr = mime.getMimetype(extNoDot, "") + + mimetypeOpt = if mimetypeStr == "": string.none else: mimetypeStr.some + + let sessionId = $nexUploadSessionCount + nexUploadSessionCount.inc() + + let stream = BufferStream.new() + let lpStream = LPStream(stream) + let node = storage[].node + + let onBlockStored = proc(chunk: seq[byte]): void {.gcsafe, raises: [].} = + try: + if uploadSessions.contains($sessionId): + let session = uploadSessions[$sessionId] + if session.onProgress != nil: + session.onProgress(chunk.len) + except KeyError: + error "Failed to push progress update, session is not found: ", + sessionId = $sessionId + + let blockSize = + if chunkSize.NBytes > 0.NBytes: chunkSize.NBytes else: DefaultBlockSize + let fut = node.store(lpStream, filenameOpt, mimetypeOpt, blockSize, onBlockStored) + + uploadSessions[sessionId] = UploadSession( + stream: stream, fut: fut, filepath: $filepath, chunkSize: blockSize.int + ) + + return ok(sessionId) + +proc chunk( + storage: ptr CodexServer, sessionId: cstring, chunk: seq[byte] +): Future[Result[string, string]] {.async: (raises: []).} = + ## Upload a chunk of data to the session identified by sessionId. + ## The chunk is pushed to the BufferStream of the session. + ## If the chunk size is equal or greater than the session chunkSize, + ## the `onProgress` callback is temporarily set to receive the progress + ## from `onBlockStored` callback. This provide a way to report progress + ## precisely when a block is stored. + ## If the chunk size is smaller than the session chunkSize, + ## the `onProgress` callback is not set because the LPStream will + ## wait until enough data is received to form a block before storing it. + ## The wrapper may then report the progress because the data is in the stream + ## but not yet stored. + + if not uploadSessions.contains($sessionId): + return err("Failed to upload the chunk, the session is not found: " & $sessionId) + + var fut = newFuture[void]() + + try: + let session = uploadSessions[$sessionId] + + if chunk.len >= session.chunkSize: + uploadSessions[$sessionId].onProgress = proc( + bytes: int + ): void {.gcsafe, raises: [].} = + fut.complete() + await session.stream.pushData(chunk) + else: + fut = session.stream.pushData(chunk) + + await fut + + uploadSessions[$sessionId].onProgress = nil + except KeyError: + return err("Failed to upload the chunk, the session is not found: " & $sessionId) + except LPError as e: + return err("Failed to upload the chunk, stream error: " & $e.msg) + except CancelledError: + return err("Failed to upload the chunk, operation cancelled.") + except CatchableError as e: + return err("Failed to upload the chunk: " & $e.msg) + finally: + if not fut.finished(): + fut.cancelSoon() + + return ok("") + +proc finalize( + storage: ptr CodexServer, sessionId: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + ## Finalize the upload session identified by sessionId. + ## This closes the BufferStream and waits for the `node.store` future + ## to complete. It returns the CID of the uploaded file. + + if not uploadSessions.contains($sessionId): + return + err("Failed to finalize the upload session, session not found: " & $sessionId) + + var session: UploadSession + try: + session = uploadSessions[$sessionId] + await session.stream.pushEof() + + let res = await session.fut + if res.isErr: + return err("Failed to finalize the upload session: " & res.error().msg) + + return ok($res.get()) + except KeyError: + return + err("Failed to finalize the upload session, invalid session ID: " & $sessionId) + except LPStreamError as e: + return err("Failed to finalize the upload session, stream error: " & $e.msg) + except CancelledError: + return err("Failed to finalize the upload session, operation cancelled") + except CatchableError as e: + return err("Failed to finalize the upload session: " & $e.msg) + finally: + if uploadSessions.contains($sessionId): + uploadSessions.del($sessionId) + + if session.fut != nil and not session.fut.finished(): + session.fut.cancelSoon() + +proc cancel( + storage: ptr CodexServer, sessionId: cstring +): Future[Result[string, string]] {.async: (raises: []).} = + ## Cancel the upload session identified by sessionId. + ## This cancels the `node.store` future and removes the session + ## from the table. + + if not uploadSessions.contains($sessionId): + # Session not found, nothing to cancel + return ok("") + + try: + let session = uploadSessions[$sessionId] + session.fut.cancelSoon() + except KeyError: + # Session not found, nothing to cancel + return ok("") + + uploadSessions.del($sessionId) + + return ok("") + +proc streamFile( + filepath: string, stream: BufferStream, chunkSize: int +): Future[Result[void, string]] {.async: (raises: [CancelledError]).} = + ## Streams a file from the given filepath using faststream. + ## fsMultiSync cannot be used with chronos because of this warning: + ## Warning: chronos backend uses nested calls to `waitFor` which + ## is not supported by chronos - it is not recommended to use it until + ## this has been resolved. + ## + ## Ideally when it is solved, we should use fsMultiSync or find a way to use async + ## file I/O with chronos, see https://github.com/status-im/nim-chronos/issues/501. + + try: + let inputStreamHandle = filepath.fileInput() + let inputStream = inputStreamHandle.implicitDeref + + var buf = newSeq[byte](chunkSize) + while inputStream.readable: + let read = inputStream.readIntoEx(buf) + if read == 0: + break + await stream.pushData(buf[0 ..< read]) + # let byt = inputStream.read + # await stream.pushData(@[byt]) + return ok() + except IOError, OSError, LPStreamError: + let e = getCurrentException() + return err("Failed to stream the file: " & $e.msg) + +proc file( + storage: ptr CodexServer, sessionId: cstring, onProgress: OnProgressHandler +): Future[Result[string, string]] {.async: (raises: []).} = + ## Starts the file upload for the session identified by sessionId. + ## Will call finalize when done and return the CID of the uploaded file. + ## + ## The onProgress callback is called with the number of bytes + ## to report the progress of the upload. + + if not uploadSessions.contains($sessionId): + return err("Failed to upload the file, invalid session ID: " & $sessionId) + + var session: UploadSession + + try: + uploadSessions[$sessionId].onProgress = onProgress + session = uploadSessions[$sessionId] + + let res = await streamFile(session.filepath, session.stream, session.chunkSize) + if res.isErr: + return err("Failed to upload the file: " & res.error) + + return await storage.finalize(sessionId) + except KeyError: + return err("Failed to upload the file, the session is not found: " & $sessionId) + except LPStreamError, IOError: + let e = getCurrentException() + return err("Failed to upload the file: " & $e.msg) + except CancelledError: + return err("Failed to upload the file, the operation is cancelled.") + except CatchableError as e: + return err("Failed to upload the file: " & $e.msg) + finally: + if uploadSessions.contains($sessionId): + uploadSessions.del($sessionId) + + if session.fut != nil and not session.fut.finished(): + session.fut.cancelSoon() + +proc process*( + self: ptr NodeUploadRequest, + storage: ptr CodexServer, + onUploadProgress: OnProgressHandler = nil, +): Future[Result[string, string]] {.async: (raises: []).} = + defer: + destroyShared(self) + + case self.operation + of NodeUploadMsgType.INIT: + let res = (await init(storage, self.filepath, self.chunkSize)) + if res.isErr: + error "Failed to INIT.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.CHUNK: + let res = (await chunk(storage, self.sessionId, self.chunk)) + if res.isErr: + error "Failed to CHUNK.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.FINALIZE: + let res = (await finalize(storage, self.sessionId)) + if res.isErr: + error "Failed to FINALIZE.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.CANCEL: + let res = (await cancel(storage, self.sessionId)) + if res.isErr: + error "Failed to CANCEL.", error = res.error + return err($res.error) + return res + of NodeUploadMsgType.FILE: + let res = (await file(storage, self.sessionId, onUploadProgress)) + if res.isErr: + error "Failed to FILE.", error = res.error + return err($res.error) + return res diff --git a/library/storage_thread_requests/storage_thread_request.nim b/library/storage_thread_requests/storage_thread_request.nim new file mode 100644 index 00000000..375bf1b8 --- /dev/null +++ b/library/storage_thread_requests/storage_thread_request.nim @@ -0,0 +1,130 @@ +## This file contains the base message request type that will be handled. +## The requests are created by the main thread and processed by +## the Logos Storage Thread. + +import std/json +import results +import chronos +import ../ffi_types +import ./requests/node_lifecycle_request +import ./requests/node_info_request +import ./requests/node_debug_request +import ./requests/node_p2p_request +import ./requests/node_upload_request +import ./requests/node_download_request +import ./requests/node_storage_request + +from ../../codex/codex import CodexServer + +type RequestType* {.pure.} = enum + LIFECYCLE + INFO + DEBUG + P2P + UPLOAD + DOWNLOAD + STORAGE + +type StorageThreadRequest* = object + reqType: RequestType + + # Request payloed + reqContent: pointer + + # Callback to notify the client thread of the result + callback: StorageCallback + + # Custom state attached by the client to the request, + # returned when its callback is invoked. + userData: pointer + +proc createShared*( + T: type StorageThreadRequest, + reqType: RequestType, + reqContent: pointer, + callback: StorageCallback, + userData: pointer, +): ptr type T = + var ret = createShared(T) + ret[].reqType = reqType + ret[].reqContent = reqContent + ret[].callback = callback + ret[].userData = userData + return ret + +# NOTE: User callbacks are executed on the working thread. +# They must be fast and non-blocking; otherwise this thread will be blocked +# and no further requests can be processed. +# We can improve this by dispatching the callbacks to a thread pool or +# moving to a MP channel. +# See: https://github.com/logos-storage/logos-storage-nim/pull/1322#discussion_r2340708316 +proc handleRes[T: string | void | seq[byte]]( + res: Result[T, string], request: ptr StorageThreadRequest +) = + ## Handles the Result responses, which can either be Result[string, string] or + ## Result[void, string]. + defer: + deallocShared(request) + + if res.isErr(): + foreignThreadGc: + let msg = $res.error + if msg == "": + request[].callback(RET_ERR, nil, cast[csize_t](0), request[].userData) + else: + request[].callback( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData + ) + return + + foreignThreadGc: + var msg: cstring = "" + when T is string: + msg = res.get().cstring() + request[].callback( + RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData + ) + return + +proc process*( + T: type StorageThreadRequest, + request: ptr StorageThreadRequest, + storage: ptr CodexServer, +) {.async: (raises: []).} = + ## Processes the request in the Logos Storage thread. + ## Dispatch to the appropriate request handler based on reqType. + let retFut = + case request[].reqType + of LIFECYCLE: + cast[ptr NodeLifecycleRequest](request[].reqContent).process(storage) + of INFO: + cast[ptr NodeInfoRequest](request[].reqContent).process(storage) + of RequestType.DEBUG: + cast[ptr NodeDebugRequest](request[].reqContent).process(storage) + of P2P: + cast[ptr NodeP2PRequest](request[].reqContent).process(storage) + of STORAGE: + cast[ptr NodeStorageRequest](request[].reqContent).process(storage) + of DOWNLOAD: + let onChunk = proc(bytes: seq[byte]) = + if bytes.len > 0: + request[].callback( + RET_PROGRESS, + cast[ptr cchar](unsafeAddr bytes[0]), + cast[csize_t](bytes.len), + request[].userData, + ) + + cast[ptr NodeDownloadRequest](request[].reqContent).process(storage, onChunk) + of UPLOAD: + let onBlockReceived = proc(bytes: int) = + request[].callback(RET_PROGRESS, nil, cast[csize_t](bytes), request[].userData) + + cast[ptr NodeUploadRequest](request[].reqContent).process( + storage, onBlockReceived + ) + + handleRes(await retFut, request) + +proc `$`*(self: StorageThreadRequest): string = + return $self.reqType diff --git a/metrics/README.md b/metrics/README.md index 411d6357..f21d74e9 100644 --- a/metrics/README.md +++ b/metrics/README.md @@ -1,6 +1,6 @@ -# Codex Metrics and Dashboard +# Logos Storage Metrics and Dashboard -> This readme should help you to get started with collecting and visualizing metrics exposed by the Codex process. +> This readme should help you to get started with collecting and visualizing metrics exposed by the Logos Storage process. ## Metrics @@ -12,7 +12,7 @@ Use the `--metrics-address` and `--metrics-port` flags to to adjust the address Metrics are useful to monitor the health of the process and should aid in identifying and debugging potential issues that would be hard to notice otherwise. -All Codex metrics should be prefixed with the `codex_` prefix to be able to differentiate from metrics exposed by other subsystems. For example libp2p generally prefixed with the `libp2p_` prefix. +All Logos Storage metrics should be prefixed with the `codex_` prefix to be able to differentiate from metrics exposed by other subsystems. For example libp2p generally prefixed with the `libp2p_` prefix. Metrics can be added on an as needed basis, however, keep in mind the potential overhead they might introduce. In particular, be careful with labels as they will generate as many metrics as there are labels for a specific collector. If a metrics or a set of metrics are expensive, it is usually advisable to put them behind a compile time flag. diff --git a/metrics/codex-grafana-dashboard.json b/metrics/codex-grafana-dashboard.json index 3f2913e4..179b597d 100644 --- a/metrics/codex-grafana-dashboard.json +++ b/metrics/codex-grafana-dashboard.json @@ -101,7 +101,7 @@ "refId": "A" } ], - "title": "Codex Inflight Discovery", + "title": "Logos Storage Inflight Discovery", "type": "stat" }, { @@ -1219,7 +1219,7 @@ ] }, "timezone": "", - "title": "Codex Dashboard", + "title": "Logos Storage Dashboard", "uid": "pgeNfj2Wz2b", "version": 24, "weekStart": "" diff --git a/nix/README.md b/nix/README.md index fa34ffe2..ca3cce64 100644 --- a/nix/README.md +++ b/nix/README.md @@ -9,7 +9,7 @@ nix develop '.?submodules=1#' ## Building -To build a Codex you can use: +To build a Logos Storage you can use: ```sh nix build '.?submodules=1#default' ``` @@ -19,13 +19,19 @@ https://github.com/NixOS/nix/issues/4423 It can be also done without even cloning the repo: ```sh -nix build 'git+https://github.com/codex-storage/nim-codex?submodules=1#' +nix build 'git+https://github.com/logos-storage/logos-storage-nim?submodules=1#' +``` + +To build the C bindings you can use: + +```sh +nix build ".?submodules=1#libstorage" ``` ## Running ```sh -nix run 'git+https://github.com/codex-storage/nim-codex?submodules=1#'' +nix run 'git+https://github.com/logos-storage/logos-storage-nim?submodules=1#'' ``` ## Testing @@ -34,26 +40,26 @@ nix run 'git+https://github.com/codex-storage/nim-codex?submodules=1#'' nix flake check ".?submodules=1#" ``` -## Running Nim-Codex as a service on NixOS +## Running Logos Storage as a service on NixOS -Include nim-codex flake in your flake inputs: +Include logos-storage-nim flake in your flake inputs: ```nix inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; - nim-codex-flake.url = "git+https://github.com/codex-storage/nim-codex?submodules=1#"; + logos-storage-nim-flake.url = "git+https://github.com/logos-storage/logos-storage-nim?submodules=1#"; }; ``` To configure the service, you can use the following example: ```nix -services.nim-codex = { +services.logos-storage-nim = { enable = true; settings = { - data-dir = "/var/lib/codex-test"; + data-dir = "/var/lib/storage-test"; }; }; ``` The settings attribute set corresponds directly to the layout of the TOML configuration file -used by nim-codex. Each option follows the same naming convention as the CLI flags, but +used by logos-storage-nim. Each option follows the same naming convention as the CLI flags, but with the -- prefix removed. For more details on the TOML file structure and options, -refer to the official documentation: [nim-codex configuration file](https://docs.codex.storage/learn/run#configuration-file). \ No newline at end of file +refer to the official documentation: [logos-storage-nim configuration file](https://docs.codex.storage/learn/run#configuration-file). \ No newline at end of file diff --git a/nix/checksums.nix b/nix/checksums.nix index d79345d2..c9c9f3d4 100644 --- a/nix/checksums.nix +++ b/nix/checksums.nix @@ -6,7 +6,7 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "checksums"; - rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile; + rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\".*$" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ="; + hash = "sha256-JZhWqn4SrAgNw/HLzBK0rrj3WzvJ3Tv1nuDMn83KoYY="; } diff --git a/nix/default.nix b/nix/default.nix index b5823f86..55fb792d 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -13,7 +13,7 @@ # Perform 2-stage bootstrap instead of 3-stage to save time. quickAndDirty ? true, circomCompatPkg ? ( - builtins.getFlake "github:codex-storage/circom-compat-ffi" + builtins.getFlake "github:logos-storage/circom-compat-ffi" ).packages.${builtins.currentSystem}.default }: @@ -21,14 +21,17 @@ assert pkgs.lib.assertMsg ((src.submodules or true) == true) "Unable to build without submodules. Append '?submodules=1#' to the URI."; let - inherit (pkgs) stdenv lib writeScriptBin callPackage; + inherit (pkgs) lib writeScriptBin callPackage; revision = lib.substring 0 8 (src.rev or "dirty"); tools = callPackage ./tools.nix {}; -in pkgs.gcc13Stdenv.mkDerivation rec { - pname = "codex"; + # Pin GCC/CLang versions + stdenv = if pkgs.stdenv.isLinux then pkgs.gcc13Stdenv else pkgs.clang16Stdenv; + +in stdenv.mkDerivation rec { + pname = "storage"; version = "${tools.findKeyValue "version = \"([0-9]+\.[0-9]+\.[0-9]+)\"" ../codex.nimble}-${revision}"; @@ -46,14 +49,16 @@ in pkgs.gcc13Stdenv.mkDerivation rec { fakeGit = writeScriptBin "git" "echo ${version}"; # Fix for the nim-circom-compat-ffi package that is built with cargo. fakeCargo = writeScriptBin "cargo" "echo ${version}"; - in - with pkgs; [ - cmake - which - lsb-release - circomCompatPkg - fakeGit - fakeCargo + in with pkgs; [ + cmake + which + circomCompatPkg + fakeGit + fakeCargo + ] ++ lib.optionals stdenv.isLinux [ + lsb-release + ] ++ lib.optionals stdenv.isDarwin [ + darwin.cctools ]; # Disable CPU optimizations that make binary not portable. @@ -67,6 +72,12 @@ in pkgs.gcc13Stdenv.mkDerivation rec { "QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}" ]; + # FIXME: Remove once permanent fix is applied to NBS: + patchPhase = '' + substituteInPlace vendor/nimbus-build-system/scripts/build_nim.sh \ + --replace-fail '"''${NIX_BUILD_TOP}" != "/build"' '-z $${NIX_BUILD_TOP}' + ''; + configurePhase = '' patchShebangs . vendor/nimbus-build-system > /dev/null make nimbus-build-system-paths @@ -83,14 +94,20 @@ in pkgs.gcc13Stdenv.mkDerivation rec { ''; installPhase = '' - mkdir -p $out/bin - cp build/codex $out/bin/ + if [ -f build/storage ]; then + mkdir -p $out/bin + cp build/storage $out/bin/ + else + mkdir -p $out/lib $out/include + cp build/libstorage* $out/lib/ + cp library/libstorage.h $out/include/ + fi ''; meta = with pkgs.lib; { - description = "Nim Codex storage system"; - homepage = "https://github.com/codex-storage/nim-codex"; + description = "Logos Storage storage system"; + homepage = "https://github.com/logos-storage/logos-storage-nim"; license = licenses.mit; platforms = stableSystems; }; -} \ No newline at end of file +} diff --git a/nix/nimble.nix b/nix/nimble.nix index 39c5e0ff..6eb4c8a0 100644 --- a/nix/nimble.nix +++ b/nix/nimble.nix @@ -9,5 +9,5 @@ in pkgs.fetchFromGitHub { fetchSubmodules = true; rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-Rz48sGUKZEAp+UySla+MlsOfsERekuGKw69Tm11fDz8="; + hash = "sha256-wgzFhModFkwB8st8F5vSkua7dITGGC2cjoDvgkRVZMs="; } diff --git a/nix/service.nix b/nix/service.nix index c22a2166..345feb9b 100644 --- a/nix/service.nix +++ b/nix/service.nix @@ -7,18 +7,18 @@ let toml = pkgs.formats.toml { }; - cfg = config.services.nim-codex; + cfg = config.services.logos-storage-nim; in { options = { - services.nim-codex = { - enable = mkEnableOption "Nim Codex Node service."; + services.logos-storage-nim = { + enable = mkEnableOption "Logos Storage Node service."; package = mkOption { type = types.package; default = pkgs.callPackage ./default.nix { src = self; inherit circomCompatPkg; }; defaultText = literalExpression "pkgs.codex"; - description = mdDoc "Package to use as Nim Codex node."; + description = mdDoc "Package to use as Nim Logos Storage node."; }; settings = mkOption { @@ -31,10 +31,10 @@ in config = mkIf cfg.enable { environment.etc = { - "nim-codex/config.toml".source = toml.generate "config.toml" cfg.settings; + "logos-storage-nim/config.toml".source = toml.generate "config.toml" cfg.settings; }; - systemd.services.nim-codex = { - description = "Nim Codex Node"; + systemd.services.logos-storage-nim = { + description = "Logos Storage Node"; wantedBy = [ "multi-user.target" ]; requires = [ "network.target" ]; serviceConfig = { @@ -45,12 +45,12 @@ in NoNewPrivileges = true; PrivateDevices = true; MemoryDenyWriteExecute = true; - ExecStart = "${cfg.package}/bin/codex --config-file=/etc/nim-codex/config.toml"; + ExecStart = "${cfg.package}/bin/storage --config-file=/etc/logos-storage-nim/config.toml"; Restart = "on-failure"; }; restartIfChanged = true; restartTriggers = [ - "/etc/nim-codex/config.toml" + "/etc/logos-storage-nim/config.toml" ]; }; }; diff --git a/nix/tools.nix b/nix/tools.nix index 108d3860..47e99450 100644 --- a/nix/tools.nix +++ b/nix/tools.nix @@ -9,7 +9,12 @@ in { let linesFrom = file: splitString "\n" (fileContents file); matching = regex: lines: map (line: match regex line) lines; - extractMatch = matches: last (flatten (remove null matches)); + extractMatch = matches: + let xs = flatten (remove null matches); + in if xs == [] then + throw "findKeyValue: no match for regex '${regex}' in ${toString sourceFile}" + else + last xs; in extractMatch (matching regex (linesFrom sourceFile)); } diff --git a/openapi.yaml b/openapi.yaml index fab3d334..5922f2c6 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -2,8 +2,8 @@ openapi: 3.0.3 info: version: 0.0.1 - title: Codex API - description: "List of endpoints and interfaces available to Codex API users" + title: Logos Storage API + description: "List of endpoints and interfaces available to Logos Storage API users" security: - {} @@ -115,7 +115,7 @@ components: seen: type: boolean - CodexVersion: + StorageVersion: type: object properties: version: @@ -150,7 +150,7 @@ components: - spr - announceAddresses - table - - codex + - storage properties: id: $ref: "#/components/schemas/PeerId" @@ -169,8 +169,8 @@ components: $ref: "#/components/schemas/MultiAddress" table: $ref: "#/components/schemas/PeersTable" - codex: - $ref: "#/components/schemas/CodexVersion" + storage: + $ref: "#/components/schemas/StorageVersion" SalesAvailability: type: object @@ -485,18 +485,18 @@ components: quotaMaxBytes: type: integer format: int64 - description: "Maximum storage space (in bytes) available for the node in Codex's local repository." + description: "Maximum storage space (in bytes) available for the node in Logos Storage's local repository." quotaUsedBytes: type: integer format: int64 - description: "Amount of storage space (in bytes) currently used for storing files in Codex's local repository." + description: "Amount of storage space (in bytes) currently used for storing files in Logos Storage's local repository." quotaReservedBytes: type: integer format: int64 - description: "Amount of storage reserved (in bytes) in the Codex's local repository for future use when storage requests will be picked up and hosted by the node using node's availabilities. This does not include the storage currently in use." + description: "Amount of storage reserved (in bytes) in the Logos Storage's local repository for future use when storage requests will be picked up and hosted by the node using node's availabilities. This does not include the storage currently in use." servers: - - url: "http://localhost:8080/api/codex/v1" + - url: "http://localhost:8080/api/storage/v1" tags: - name: Marketplace diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index c54a1fff..6b36a630 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -54,7 +54,7 @@ asyncchecksuite "Block Advertising and Discovery": peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() - (manifest, tree) = makeManifestAndTree(blocks).tryGet() + (_, tree, manifest) = makeDataset(blocks).tryGet() manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() @@ -172,7 +172,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": break blocks.add(bt.Block.new(chunk).tryGet()) - let (manifest, tree) = makeManifestAndTree(blocks).tryGet() + let (_, tree, manifest) = makeDataset(blocks).tryGet() manifests.add(manifest) mBlocks.add(manifest.asBlock()) trees.add(tree) @@ -216,7 +216,6 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": test "E2E - Should advertise and discover blocks": # Distribute the manifests and trees amongst 1..3 # Ask 0 to download everything without connecting him beforehand - var advertised: Table[Cid, SignedPeerRecord] MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( @@ -242,6 +241,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid) ) ], + allowSpurious = true, ) discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) @@ -252,6 +252,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid) ) ], + allowSpurious = true, ) discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) @@ -262,6 +263,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid) ) ], + allowSpurious = true, ) MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( @@ -311,6 +313,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid) ) ], + allowSpurious = true, ) discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) @@ -321,6 +324,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid) ) ], + allowSpurious = true, ) discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) @@ -331,6 +335,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid) ) ], + allowSpurious = true, ) MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 9efab1a6..df3f8c80 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -43,7 +43,7 @@ asyncchecksuite "Test Discovery Engine": blocks.add(bt.Block.new(chunk).tryGet()) - (manifest, tree) = makeManifestAndTree(blocks).tryGet() + (_, tree, manifest) = makeDataset(blocks).tryGet() manifestBlock = manifest.asBlock() blocks.add(manifestBlock) diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index 6ab345d1..7c2a9ed8 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -29,14 +29,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps1 = generateNodes(1, blocks1).components[0] nodeCmps2 = generateNodes(1, blocks2).components[0] - await allFuturesThrowing( - nodeCmps1.switch.start(), - nodeCmps1.blockDiscovery.start(), - nodeCmps1.engine.start(), - nodeCmps2.switch.start(), - nodeCmps2.blockDiscovery.start(), - nodeCmps2.engine.start(), - ) + await allFuturesThrowing(nodeCmps1.start(), nodeCmps2.start()) # initialize our want lists pendingBlocks1 = @@ -65,14 +58,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes": check isNil(peerCtx2).not teardown: - await allFuturesThrowing( - nodeCmps1.blockDiscovery.stop(), - nodeCmps1.engine.stop(), - nodeCmps1.switch.stop(), - nodeCmps2.blockDiscovery.stop(), - nodeCmps2.engine.stop(), - nodeCmps2.switch.stop(), - ) + await allFuturesThrowing(nodeCmps1.stop(), nodeCmps2.stop()) test "Should exchange blocks on connect": await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds) @@ -96,17 +82,11 @@ asyncchecksuite "NetworkStore engine - 2 nodes": test "Should send want-have for block": let blk = bt.Block.new("Block 1".toBytes).tryGet() let blkFut = nodeCmps1.pendingBlocks.getWantHandle(blk.cid) + peerCtx2.blockRequestScheduled(blk.address) + (await nodeCmps2.localStore.putBlock(blk)).tryGet() - let entry = WantListEntry( - address: blk.address, - priority: 1, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - ) - - peerCtx1.peerWants.add(entry) + peerCtx1.wantedBlocks.incl(blk.address) check nodeCmps2.engine.taskQueue.pushOrUpdateNoWait(peerCtx1).isOk check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet() @@ -209,3 +189,38 @@ asyncchecksuite "NetworkStore - multiple nodes": check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3] check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15] + +asyncchecksuite "NetworkStore - dissemination": + var nodes: seq[NodesComponents] + + teardown: + if nodes.len > 0: + await nodes.stop() + + test "Should disseminate blocks across large diameter swarm": + let dataset = makeDataset(await makeRandomBlocks(60 * 256, 256'nb)).tryGet() + + nodes = generateNodes( + 6, + config = NodeConfig( + useRepoStore: false, + findFreePorts: false, + basePort: 8080, + createFullNode: false, + enableBootstrap: false, + enableDiscovery: true, + ), + ) + + await assignBlocks(nodes[0], dataset, 0 .. 9) + await assignBlocks(nodes[1], dataset, 10 .. 19) + await assignBlocks(nodes[2], dataset, 20 .. 29) + await assignBlocks(nodes[3], dataset, 30 .. 39) + await assignBlocks(nodes[4], dataset, 40 .. 49) + await assignBlocks(nodes[5], dataset, 50 .. 59) + + await nodes.start() + await nodes.linearTopology() + + let downloads = nodes.mapIt(downloadDataset(it, dataset)) + await allFuturesThrowing(downloads).wait(30.seconds) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index 0541c119..1afe2147 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -27,8 +27,6 @@ const NopSendWantCancellationsProc = proc( asyncchecksuite "NetworkStore engine basic": var - rng: Rng - seckey: PrivateKey peerId: PeerId chunker: Chunker wallet: WalletRef @@ -39,9 +37,7 @@ asyncchecksuite "NetworkStore engine basic": done: Future[void] setup: - rng = Rng.instance() - seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.example chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb) wallet = WalletRef.example blockDiscovery = Discovery.new() @@ -83,7 +79,7 @@ asyncchecksuite "NetworkStore engine basic": for b in blocks: discard engine.pendingBlocks.getWantHandle(b.cid) - await engine.setupPeer(peerId) + await engine.peerAddedHandler(peerId) await done.wait(100.millis) @@ -111,14 +107,12 @@ asyncchecksuite "NetworkStore engine basic": ) engine.pricing = pricing.some - await engine.setupPeer(peerId) + await engine.peerAddedHandler(peerId) await done.wait(100.millis) asyncchecksuite "NetworkStore engine handlers": var - rng: Rng - seckey: PrivateKey peerId: PeerId chunker: Chunker wallet: WalletRef @@ -134,8 +128,7 @@ asyncchecksuite "NetworkStore engine handlers": blocks: seq[Block] setup: - rng = Rng.instance() - chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) + chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb) while true: let chunk = await chunker.getBytes() @@ -144,8 +137,7 @@ asyncchecksuite "NetworkStore engine handlers": blocks.add(Block.new(chunk).tryGet()) - seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.example wallet = WalletRef.example blockDiscovery = Discovery.new() peerStore = PeerCtxStore.new() @@ -174,7 +166,7 @@ asyncchecksuite "NetworkStore engine handlers": let ctx = await engine.taskQueue.pop() check ctx.id == peerId # only `wantBlock` scheduled - check ctx.peerWants.mapIt(it.address.cidOrTreeCid) == blocks.mapIt(it.cid) + check ctx.wantedBlocks == blocks.mapIt(it.address).toHashSet let done = handler() await engine.wantListHandler(peerId, wantList) @@ -249,6 +241,9 @@ asyncchecksuite "NetworkStore engine handlers": test "Should store blocks in local store": let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) + for blk in blocks: + peerCtx.blockRequestScheduled(blk.address) + let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) # Install NOP for want list cancellations so they don't cause a crash @@ -274,6 +269,9 @@ asyncchecksuite "NetworkStore engine handlers": (it.address, Presence(address: it.address, price: rand(uint16).u256, have: true)) ).toTable + for blk in blocks: + peerContext.blockRequestScheduled(blk.address) + engine.network = BlockExcNetwork( request: BlockExcRequest( sendPayment: proc( @@ -337,33 +335,44 @@ asyncchecksuite "NetworkStore engine handlers": check a in peerCtx.peerHave check peerCtx.blocks[a].price == price - test "Should send cancellations for received blocks": + test "Should send cancellations for requested blocks only": let - pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) - blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) - cancellations = newTable(blocks.mapIt((it.address, newFuture[void]())).toSeq) + pendingPeer = peerId # peer towards which we have pending block requests + pendingPeerCtx = peerCtx + senderPeer = PeerId.example # peer that will actually send the blocks + senderPeerCtx = BlockExcPeerCtx(id: senderPeer) + reqBlocks = @[blocks[0], blocks[4]] # blocks that we requested to pendingPeer + reqBlockAddrs = reqBlocks.mapIt(it.address) + blockHandles = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) - peerCtx.blocks = blocks.mapIt( - (it.address, Presence(address: it.address, have: true, price: UInt256.example)) - ).toTable + var cancelled: HashSet[BlockAddress] + + engine.peers.add(senderPeerCtx) + for address in reqBlockAddrs: + pendingPeerCtx.blockRequestScheduled(address) + + for address in blocks.mapIt(it.address): + senderPeerCtx.blockRequestScheduled(address) proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] ) {.async: (raises: [CancelledError]).} = + assert id == pendingPeer for address in addresses: - cancellations[address].catch.expect("address should exist").complete() + cancelled.incl(address) engine.network = BlockExcNetwork( request: BlockExcRequest(sendWantCancellations: sendWantCancellations) ) - await engine.blocksDeliveryHandler(peerId, blocksDelivery) - discard await allFinished(pending).wait(100.millis) - await allFuturesThrowing(cancellations.values().toSeq) + let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + await engine.blocksDeliveryHandler(senderPeer, blocksDelivery) + discard await allFinished(blockHandles).wait(100.millis) + + check cancelled == reqBlockAddrs.toHashSet() asyncchecksuite "Block Download": var - rng: Rng seckey: PrivateKey peerId: PeerId chunker: Chunker @@ -380,8 +389,7 @@ asyncchecksuite "Block Download": blocks: seq[Block] setup: - rng = Rng.instance() - chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) + chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb) while true: let chunk = await chunker.getBytes() @@ -390,8 +398,7 @@ asyncchecksuite "Block Download": blocks.add(Block.new(chunk).tryGet()) - seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.example wallet = WalletRef.example blockDiscovery = Discovery.new() peerStore = PeerCtxStore.new() @@ -409,13 +416,27 @@ asyncchecksuite "Block Download": localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks ) - peerCtx = BlockExcPeerCtx(id: peerId) + peerCtx = BlockExcPeerCtx(id: peerId, activityTimeout: 100.milliseconds) engine.peers.add(peerCtx) - test "Should exhaust retries": + test "Should reschedule blocks on peer timeout": + let + slowPeer = peerId + fastPeer = PeerId.example + slowPeerCtx = peerCtx + # "Fast" peer has in fact a generous timeout. This should avoid timing issues + # in the test. + fastPeerCtx = BlockExcPeerCtx(id: fastPeer, activityTimeout: 60.seconds) + requestedBlock = blocks[0] + var - retries = 2 - address = BlockAddress.init(blocks[0].cid) + slowPeerWantList = newFuture[void]("slowPeerWantList") + fastPeerWantList = newFuture[void]("fastPeerWantList") + slowPeerDropped = newFuture[void]("slowPeerDropped") + slowPeerBlockRequest = newFuture[void]("slowPeerBlockRequest") + fastPeerBlockRequest = newFuture[void]("fastPeerBlockRequest") + + engine.peers.add(fastPeerCtx) proc sendWantList( id: PeerId, @@ -426,68 +447,63 @@ asyncchecksuite "Block Download": full: bool = false, sendDontHave: bool = false, ) {.async: (raises: [CancelledError]).} = - check wantType == WantHave - check not engine.pendingBlocks.isInFlight(address) - check engine.pendingBlocks.retries(address) == retries - retries -= 1 + check addresses == @[requestedBlock.address] - engine.pendingBlocks.blockRetries = 2 - engine.pendingBlocks.retryInterval = 10.millis + if wantType == WantBlock: + if id == slowPeer: + slowPeerBlockRequest.complete() + else: + fastPeerBlockRequest.complete() + + if wantType == WantHave: + if id == slowPeer: + slowPeerWantList.complete() + else: + fastPeerWantList.complete() + + proc onPeerDropped( + peer: PeerId + ): Future[void] {.async: (raises: [CancelledError]).} = + assert peer == slowPeer + slowPeerDropped.complete() + + proc selectPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx = + # Looks for the slow peer. + for peer in peers: + if peer.id == slowPeer: + return peer + + return peers[0] + + engine.selectPeer = selectPeer + engine.pendingBlocks.retryInterval = 200.milliseconds engine.network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) + engine.network.handlers.onPeerDropped = onPeerDropped - let pending = engine.requestBlock(address) + let blockHandle = engine.requestBlock(requestedBlock.address) - expect RetriesExhaustedError: - discard (await pending).tryGet() + # Waits for the peer to send its want list to both peers. + await slowPeerWantList.wait(5.seconds) + await fastPeerWantList.wait(5.seconds) - test "Should retry block request": - var - address = BlockAddress.init(blocks[0].cid) - steps = newAsyncEvent() + let blockPresence = + @[BlockPresence(address: requestedBlock.address, type: BlockPresenceType.Have)] - proc sendWantList( - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false, - ) {.async: (raises: [CancelledError]).} = - case wantType - of WantHave: - check engine.pendingBlocks.isInFlight(address) == false - check engine.pendingBlocks.retriesExhausted(address) == false - steps.fire() - of WantBlock: - check engine.pendingBlocks.isInFlight(address) == true - check engine.pendingBlocks.retriesExhausted(address) == false - steps.fire() - - engine.pendingBlocks.blockRetries = 10 - engine.pendingBlocks.retryInterval = 10.millis - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc - ) - ) - - let pending = engine.requestBlock(address) - await steps.wait() - - # add blocks precense - peerCtx.blocks = blocks.mapIt( - (it.address, Presence(address: it.address, have: true, price: UInt256.example)) - ).toTable - - steps.clear() - await steps.wait() + await engine.blockPresenceHandler(slowPeer, blockPresence) + await engine.blockPresenceHandler(fastPeer, blockPresence) + # Waits for the peer to ask for the block. + await slowPeerBlockRequest.wait(5.seconds) + # Don't reply and wait for the peer to be dropped by timeout. + await slowPeerDropped.wait(5.seconds) + # The engine should retry and ask the fast peer for the block. + await fastPeerBlockRequest.wait(5.seconds) await engine.blocksDeliveryHandler( - peerId, @[BlockDelivery(blk: blocks[0], address: address)] + fastPeer, @[BlockDelivery(blk: requestedBlock, address: requestedBlock.address)] ) - check (await pending).tryGet() == blocks[0] + + discard await blockHandle.wait(5.seconds) test "Should cancel block request": var @@ -522,8 +538,6 @@ asyncchecksuite "Block Download": asyncchecksuite "Task Handler": var - rng: Rng - seckey: PrivateKey peerId: PeerId chunker: Chunker wallet: WalletRef @@ -541,8 +555,7 @@ asyncchecksuite "Task Handler": blocks: seq[Block] setup: - rng = Rng.instance() - chunker = RandomChunker.new(rng, size = 1024, chunkSize = 256'nb) + chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256'nb) while true: let chunk = await chunker.getBytes() if chunk.len <= 0: @@ -550,8 +563,7 @@ asyncchecksuite "Task Handler": blocks.add(Block.new(chunk).tryGet()) - seckey = PrivateKey.random(rng[]).tryGet() - peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + peerId = PeerId.example wallet = WalletRef.example blockDiscovery = Discovery.new() peerStore = PeerCtxStore.new() @@ -571,138 +583,72 @@ asyncchecksuite "Task Handler": peersCtx = @[] for i in 0 .. 3: - let seckey = PrivateKey.random(rng[]).tryGet() - peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet()) - + peers.add(PeerId.example) peersCtx.add(BlockExcPeerCtx(id: peers[i])) peerStore.add(peersCtx[i]) engine.pricing = Pricing.example.some - test "Should send want-blocks in priority order": + # FIXME: this is disabled for now: I've dropped block priorities to make + # my life easier as I try to optimize the protocol, and also because + # they were not being used anywhere. + # + # test "Should send want-blocks in priority order": + # proc sendBlocksDelivery( + # id: PeerId, blocksDelivery: seq[BlockDelivery] + # ) {.async: (raises: [CancelledError]).} = + # check blocksDelivery.len == 2 + # check: + # blocksDelivery[1].address == blocks[0].address + # blocksDelivery[0].address == blocks[1].address + + # for blk in blocks: + # (await engine.localStore.putBlock(blk)).tryGet() + # engine.network.request.sendBlocksDelivery = sendBlocksDelivery + + # # second block to send by priority + # peersCtx[0].peerWants.add( + # WantListEntry( + # address: blocks[0].address, + # priority: 49, + # cancel: false, + # wantType: WantType.WantBlock, + # sendDontHave: false, + # ) + # ) + + # # first block to send by priority + # peersCtx[0].peerWants.add( + # WantListEntry( + # address: blocks[1].address, + # priority: 50, + # cancel: false, + # wantType: WantType.WantBlock, + # sendDontHave: false, + # ) + # ) + + # await engine.taskHandler(peersCtx[0]) + + test "Should mark outgoing blocks as sent": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] ) {.async: (raises: [CancelledError]).} = - check blocksDelivery.len == 2 - check: - blocksDelivery[1].address == blocks[0].address - blocksDelivery[0].address == blocks[1].address + let blockAddress = peersCtx[0].wantedBlocks.toSeq[0] + check peersCtx[0].isBlockSent(blockAddress) for blk in blocks: (await engine.localStore.putBlock(blk)).tryGet() engine.network.request.sendBlocksDelivery = sendBlocksDelivery - # second block to send by priority - peersCtx[0].peerWants.add( - WantListEntry( - address: blocks[0].address, - priority: 49, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - ) - ) - - # first block to send by priority - peersCtx[0].peerWants.add( - WantListEntry( - address: blocks[1].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - ) - ) + peersCtx[0].wantedBlocks.incl(blocks[0].address) await engine.taskHandler(peersCtx[0]) - test "Should set in-flight for outgoing blocks": - proc sendBlocksDelivery( - id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.async: (raises: [CancelledError]).} = - check peersCtx[0].peerWants[0].inFlight - - for blk in blocks: - (await engine.localStore.putBlock(blk)).tryGet() - engine.network.request.sendBlocksDelivery = sendBlocksDelivery - - peersCtx[0].peerWants.add( - WantListEntry( - address: blocks[0].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - inFlight: false, - ) - ) - await engine.taskHandler(peersCtx[0]) - - test "Should clear in-flight when local lookup fails": - peersCtx[0].peerWants.add( - WantListEntry( - address: blocks[0].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - inFlight: false, - ) - ) - await engine.taskHandler(peersCtx[0]) - - check not peersCtx[0].peerWants[0].inFlight - - test "Should send presence": - let present = blocks - let missing = @[Block.new("missing".toBytes).tryGet()] - let price = (!engine.pricing).price - - proc sendPresence( - id: PeerId, presence: seq[BlockPresence] - ) {.async: (raises: [CancelledError]).} = - check presence.mapIt(!Presence.init(it)) == - @[ - Presence(address: present[0].address, have: true, price: price), - Presence(address: present[1].address, have: true, price: price), - Presence(address: missing[0].address, have: false), - ] - - for blk in blocks: - (await engine.localStore.putBlock(blk)).tryGet() - engine.network.request.sendPresence = sendPresence - - # have block - peersCtx[0].peerWants.add( - WantListEntry( - address: present[0].address, - priority: 1, - cancel: false, - wantType: WantType.WantHave, - sendDontHave: false, - ) - ) - - # have block - peersCtx[0].peerWants.add( - WantListEntry( - address: present[1].address, - priority: 1, - cancel: false, - wantType: WantType.WantHave, - sendDontHave: false, - ) - ) - - # don't have block - peersCtx[0].peerWants.add( - WantListEntry( - address: missing[0].address, - priority: 1, - cancel: false, - wantType: WantType.WantHave, - sendDontHave: false, - ) - ) + test "Should not mark blocks for which local look fails as sent": + peersCtx[0].wantedBlocks.incl(blocks[0].address) await engine.taskHandler(peersCtx[0]) + + let blockAddress = peersCtx[0].wantedBlocks.toSeq[0] + check not peersCtx[0].isBlockSent(blockAddress) diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index b9a51c9d..ab19a6ae 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -40,7 +40,7 @@ asyncchecksuite "Network - Handlers": done = newFuture[void]() buffer = BufferStream.new() network = BlockExcNetwork.new(switch = newStandardSwitch(), connProvider = getConn) - network.setupPeer(peerId) + await network.handlePeerJoined(peerId) networkPeer = network.peers[peerId] discard await networkPeer.connect() diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index e2983d10..f348c1d5 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -81,8 +81,9 @@ suite "Peer Context Store Peer Selection": ) ) - peerCtxs[0].peerWants = entries - peerCtxs[5].peerWants = entries + for address in addresses: + peerCtxs[0].wantedBlocks.incl(address) + peerCtxs[5].wantedBlocks.incl(address) let peers = store.peersWant(addresses[4]) diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 52b8a0b8..260adbfc 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -38,8 +38,7 @@ proc example*(_: type Pricing): Pricing = Pricing(address: EthAddress.example, price: uint32.rand.u256) proc example*(_: type bt.Block, size: int = 4096): bt.Block = - let length = rand(size) - let bytes = newSeqWith(length, rand(uint8)) + let bytes = newSeqWith(size, rand(uint8)) bt.Block.new(bytes).tryGet() proc example*(_: type PeerId): PeerId = diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 898dd16e..b855f412 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -12,13 +12,16 @@ import pkg/codex/rng import pkg/codex/utils import ./helpers/nodeutils +import ./helpers/datasetutils import ./helpers/randomchunker import ./helpers/mockchunker import ./helpers/mockdiscovery import ./helpers/always import ../checktest -export randomchunker, nodeutils, mockdiscovery, mockchunker, always, checktest, manifest +export + randomchunker, nodeutils, datasetutils, mockdiscovery, mockchunker, always, checktest, + manifest export libp2p except setup, eventually @@ -46,23 +49,6 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] = return buf -proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, CodexTree) = - if blocks.len == 0: - return failure("Blocks list was empty") - - let - datasetSize = blocks.mapIt(it.data.len).foldl(a + b) - blockSize = blocks.mapIt(it.data.len).foldl(max(a, b)) - tree = ?CodexTree.init(blocks.mapIt(it.cid)) - treeCid = ?tree.rootCid - manifest = Manifest.new( - treeCid = treeCid, - blockSize = NBytes(blockSize), - datasetSize = NBytes(datasetSize), - ) - - return success((manifest, tree)) - proc makeWantList*( cids: seq[Cid], priority: int = 0, @@ -91,7 +77,7 @@ proc storeDataGetManifest*( (await store.putBlock(blk)).tryGet() let - (manifest, tree) = makeManifestAndTree(blocks).tryGet() + (_, tree, manifest) = makeDataset(blocks).tryGet() treeCid = tree.rootCid.tryGet() for i in 0 ..< tree.leavesCount: @@ -110,19 +96,6 @@ proc storeDataGetManifest*( return await storeDataGetManifest(store, blocks) -proc makeRandomBlocks*( - datasetSize: int, blockSize: NBytes -): Future[seq[Block]] {.async.} = - var chunker = - RandomChunker.new(Rng.instance(), size = datasetSize, chunkSize = blockSize) - - while true: - let chunk = await chunker.getBytes() - if chunk.len <= 0: - break - - result.add(Block.new(chunk).tryGet()) - proc corruptBlocks*( store: BlockStore, manifest: Manifest, blks, bytes: int ): Future[seq[int]] {.async.} = @@ -147,4 +120,5 @@ proc corruptBlocks*( bytePos.add(ii) blk.data[ii] = byte 0 + return pos diff --git a/tests/codex/helpers/datasetutils.nim b/tests/codex/helpers/datasetutils.nim new file mode 100644 index 00000000..56f26e34 --- /dev/null +++ b/tests/codex/helpers/datasetutils.nim @@ -0,0 +1,45 @@ +import std/random + +import pkg/chronos +import pkg/codex/blocktype as bt +import pkg/codex/merkletree +import pkg/codex/manifest +import pkg/codex/rng + +import ./randomchunker + +type TestDataset* = tuple[blocks: seq[Block], tree: CodexTree, manifest: Manifest] + +proc makeRandomBlock*(size: NBytes): Block = + let bytes = newSeqWith(size.int, rand(uint8)) + Block.new(bytes).tryGet() + +proc makeRandomBlocks*( + datasetSize: int, blockSize: NBytes +): Future[seq[Block]] {.async.} = + var chunker = + RandomChunker.new(Rng.instance(), size = datasetSize, chunkSize = blockSize) + + while true: + let chunk = await chunker.getBytes() + if chunk.len <= 0: + break + + result.add(Block.new(chunk).tryGet()) + +proc makeDataset*(blocks: seq[Block]): ?!TestDataset = + if blocks.len == 0: + return failure("Blocks list was empty") + + let + datasetSize = blocks.mapIt(it.data.len).foldl(a + b) + blockSize = blocks.mapIt(it.data.len).foldl(max(a, b)) + tree = ?CodexTree.init(blocks.mapIt(it.cid)) + treeCid = ?tree.rootCid + manifest = Manifest.new( + treeCid = treeCid, + blockSize = NBytes(blockSize), + datasetSize = NBytes(datasetSize), + ) + + return success((blocks, tree, manifest)) diff --git a/tests/codex/helpers/mockchunker.nim b/tests/codex/helpers/mockchunker.nim index eb51f7ca..1ecd8a21 100644 --- a/tests/codex/helpers/mockchunker.nim +++ b/tests/codex/helpers/mockchunker.nim @@ -21,7 +21,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} = if consumed >= dataset.len: return 0 diff --git a/tests/codex/helpers/mockclock.nim b/tests/codex/helpers/mockclock.nim index be1eb4d2..76446041 100644 --- a/tests/codex/helpers/mockclock.nim +++ b/tests/codex/helpers/mockclock.nim @@ -33,11 +33,18 @@ proc advance*(clock: MockClock, seconds: int64) = method now*(clock: MockClock): SecondsSince1970 = clock.time -method waitUntil*(clock: MockClock, time: SecondsSince1970) {.async.} = - if time > clock.now(): - let future = newFuture[void]() - clock.waiting.add(Waiting(until: time, future: future)) - await future +method waitUntil*( + clock: MockClock, time: SecondsSince1970 +) {.async: (raises: [CancelledError]).} = + try: + if time > clock.now(): + let future = newFuture[void]() + clock.waiting.add(Waiting(until: time, future: future)) + await future + except CancelledError as e: + raise e + except Exception as e: + discard proc isWaiting*(clock: MockClock): bool = clock.waiting.len > 0 diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 4110c577..f0142650 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) @@ -70,3 +70,31 @@ method provide*( return await d.publishHostProvideHandler(d, host) + +proc nullDiscovery*(): MockDiscovery = + proc findBlockProvidersHandler( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + return @[] + + proc publishBlockProvideHandler( + d: MockDiscovery, cid: Cid + ): Future[void] {.async: (raises: [CancelledError]).} = + return + + proc findHostProvidersHandler( + d: MockDiscovery, host: ca.Address + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + return @[] + + proc publishHostProvideHandler( + d: MockDiscovery, host: ca.Address + ): Future[void] {.async: (raises: [CancelledError]).} = + return + + return MockDiscovery( + findBlockProvidersHandler: findBlockProvidersHandler, + publishBlockProvideHandler: publishBlockProvideHandler, + findHostProvidersHandler: findHostProvidersHandler, + publishHostProvideHandler: publishHostProvideHandler, + ) diff --git a/tests/codex/helpers/mockrepostore.nim b/tests/codex/helpers/mockrepostore.nim index 52e598d9..3d29f4a3 100644 --- a/tests/codex/helpers/mockrepostore.nim +++ b/tests/codex/helpers/mockrepostore.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/tests/codex/helpers/mocktimer.nim b/tests/codex/helpers/mocktimer.nim index 8d7a5911..162a6e81 100644 --- a/tests/codex/helpers/mocktimer.nim +++ b/tests/codex/helpers/mocktimer.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/tests/codex/helpers/nodeutils.nim b/tests/codex/helpers/nodeutils.nim index 6d2edd46..12c38350 100644 --- a/tests/codex/helpers/nodeutils.nim +++ b/tests/codex/helpers/nodeutils.nim @@ -1,4 +1,5 @@ import std/sequtils +import std/sets import pkg/chronos import pkg/taskpools @@ -12,10 +13,15 @@ import pkg/codex/blockexchange import pkg/codex/systemclock import pkg/codex/nat import pkg/codex/utils/natutils +import pkg/codex/utils/safeasynciter import pkg/codex/slots +import pkg/codex/merkletree +import pkg/codex/manifest import pkg/codex/node +import ./datasetutils +import ./mockdiscovery import ../examples import ../../helpers @@ -58,6 +64,7 @@ type basePort*: int = 8080 createFullNode*: bool = false enableBootstrap*: bool = false + enableDiscovery*: bool = true converter toTuple*( nc: NodesComponents @@ -90,6 +97,36 @@ proc localStores*(cluster: NodesCluster): seq[BlockStore] = proc switches*(cluster: NodesCluster): seq[Switch] = cluster.components.mapIt(it.switch) +proc assignBlocks*( + node: NodesComponents, + dataset: TestDataset, + indices: seq[int], + putMerkleProofs = true, +): Future[void] {.async: (raises: [CatchableError]).} = + let rootCid = dataset.tree.rootCid.tryGet() + + for i in indices: + assert (await node.networkStore.putBlock(dataset.blocks[i])).isOk + if putMerkleProofs: + assert ( + await node.networkStore.putCidAndProof( + rootCid, i, dataset.blocks[i].cid, dataset.tree.getProof(i).tryGet() + ) + ).isOk + +proc assignBlocks*( + node: NodesComponents, + dataset: TestDataset, + indices: HSlice[int, int], + putMerkleProofs = true, +): Future[void] {.async: (raises: [CatchableError]).} = + await assignBlocks(node, dataset, indices.toSeq, putMerkleProofs) + +proc assignBlocks*( + node: NodesComponents, dataset: TestDataset, putMerkleProofs = true +): Future[void] {.async: (raises: [CatchableError]).} = + await assignBlocks(node, dataset, 0 ..< dataset.blocks.len, putMerkleProofs) + proc generateNodes*( num: Natural, blocks: openArray[bt.Block] = [], config: NodeConfig = NodeConfig() ): NodesCluster = @@ -145,13 +182,18 @@ proc generateNodes*( store = RepoStore.new(repoStore.newDb(), mdStore.newDb(), clock = SystemClock.new()) blockDiscoveryStore = bdStore.newDb() - discovery = Discovery.new( - switch.peerInfo.privateKey, - announceAddrs = @[listenAddr], - bindPort = bindPort.Port, - store = blockDiscoveryStore, - bootstrapNodes = bootstrapNodes, - ) + discovery = + if config.enableDiscovery: + Discovery.new( + switch.peerInfo.privateKey, + announceAddrs = @[listenAddr], + bindPort = bindPort.Port, + store = blockDiscoveryStore, + bootstrapNodes = bootstrapNodes, + ) + else: + nullDiscovery() + waitFor store.start() (store.BlockStore, @[bdStore, repoStore, mdStore], discovery) else: @@ -225,6 +267,26 @@ proc generateNodes*( return NodesCluster(components: components, taskpool: taskpool) +proc start*(nodes: NodesComponents) {.async: (raises: [CatchableError]).} = + await allFuturesThrowing( + nodes.switch.start(), + #nodes.blockDiscovery.start(), + nodes.engine.start(), + ) + +proc stop*(nodes: NodesComponents) {.async: (raises: [CatchableError]).} = + await allFuturesThrowing( + nodes.switch.stop(), + # nodes.blockDiscovery.stop(), + nodes.engine.stop(), + ) + +proc start*(nodes: seq[NodesComponents]) {.async: (raises: [CatchableError]).} = + await allFuturesThrowing(nodes.mapIt(it.start()).toSeq) + +proc stop*(nodes: seq[NodesComponents]) {.async: (raises: [CatchableError]).} = + await allFuturesThrowing(nodes.mapIt(it.stop()).toSeq) + proc connectNodes*(nodes: seq[Switch]) {.async.} = for dialer in nodes: for node in nodes: @@ -234,6 +296,15 @@ proc connectNodes*(nodes: seq[Switch]) {.async.} = proc connectNodes*(nodes: seq[NodesComponents]) {.async.} = await connectNodes(nodes.mapIt(it.switch)) +proc connectNodes*(nodes: varargs[NodesComponents]): Future[void] = + # varargs can't be captured on closures, and async procs are closures, + # so we have to do this mess + let copy = nodes.toSeq + ( + proc() {.async.} = + await connectNodes(copy.mapIt(it.switch)) + )() + proc connectNodes*(cluster: NodesCluster) {.async.} = await connectNodes(cluster.components) @@ -252,3 +323,26 @@ proc cleanup*(cluster: NodesCluster) {.async.} = await RepoStore(component.localStore).stop() cluster.taskpool.shutdown() + +proc linearTopology*(nodes: seq[NodesComponents]) {.async.} = + for i in 0 .. nodes.len - 2: + await connectNodes(nodes[i], nodes[i + 1]) + +proc downloadDataset*( + node: NodesComponents, dataset: TestDataset +): Future[void] {.async.} = + # This is the same as fetchBatched, but we don't construct CodexNodes so I can't use + # it here. + let requestAddresses = collect: + for i in 0 ..< dataset.manifest.blocksCount: + BlockAddress.init(dataset.manifest.treeCid, i) + + let blockCids = dataset.blocks.mapIt(it.cid).toHashSet() + + var count = 0 + for blockFut in (await node.networkStore.getBlocks(requestAddresses)): + let blk = (await blockFut).tryGet() + assert blk.cid in blockCids, "Unknown block CID: " & $blk.cid + count += 1 + + assert count == dataset.blocks.len, "Incorrect number of blocks downloaded" diff --git a/tests/codex/merkletree/generictreetests.nim b/tests/codex/merkletree/generictreetests.nim index 6244bc1c..e24cbad1 100644 --- a/tests/codex/merkletree/generictreetests.nim +++ b/tests/codex/merkletree/generictreetests.nim @@ -12,6 +12,13 @@ proc testGenericTree*[H, K, U]( let data = @data suite "Correctness tests - " & name: + test "Should build correct tree for single leaf": + let expectedRoot = compress(data[0], zero, K.KeyOddAndBottomLayer) + + let tree = makeTree(data[0 .. 0]) + check: + tree.root.tryGet == expectedRoot + test "Should build correct tree for even bottom layer": let expectedRoot = compress( compress( diff --git a/tests/codex/merkletree/testcodextree.nim b/tests/codex/merkletree/testcodextree.nim index 29390c16..16765dbb 100644 --- a/tests/codex/merkletree/testcodextree.nim +++ b/tests/codex/merkletree/testcodextree.nim @@ -1,6 +1,6 @@ import std/sequtils +import std/times -import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils import pkg/libp2p @@ -9,8 +9,11 @@ import pkg/codex/codextypes import pkg/codex/merkletree import pkg/codex/utils/digest +import pkg/taskpools + import ./helpers import ./generictreetests +import ../../asynctest # TODO: Generalize to other hashes @@ -43,9 +46,23 @@ suite "Test CodexTree": CodexTree.init(sha256, leaves = newSeq[ByteHash]()).isErr test "Should build tree from multihash leaves": - var expectedLeaves = data.mapIt(MultiHash.digest($sha256, it).tryGet()) + var + expectedLeaves = data.mapIt(MultiHash.digest($sha256, it).tryGet()) + tree = CodexTree.init(leaves = expectedLeaves) - var tree = CodexTree.init(leaves = expectedLeaves) + check: + tree.isOk + tree.get().leaves == expectedLeaves.mapIt(it.digestBytes) + tree.get().mcodec == sha256 + + test "Should build tree from multihash leaves asynchronously": + var tp = Taskpool.new(numThreads = 2) + defer: + tp.shutdown() + + let expectedLeaves = data.mapIt(MultiHash.digest($sha256, it).tryGet()) + + let tree = (await CodexTree.init(tp, leaves = expectedLeaves)) check: tree.isOk tree.get().leaves == expectedLeaves.mapIt(it.digestBytes) @@ -63,6 +80,48 @@ suite "Test CodexTree": tree.get().leaves == expectedLeaves.mapIt(it.mhash.tryGet.digestBytes) tree.get().mcodec == sha256 + test "Should build tree from cid leaves asynchronously": + var tp = Taskpool.new(numThreads = 2) + defer: + tp.shutdown() + + let expectedLeaves = data.mapIt( + Cid.init(CidVersion.CIDv1, BlockCodec, MultiHash.digest($sha256, it).tryGet).tryGet + ) + + let tree = (await CodexTree.init(tp, leaves = expectedLeaves)) + + check: + tree.isOk + tree.get().leaves == expectedLeaves.mapIt(it.mhash.tryGet.digestBytes) + tree.get().mcodec == sha256 + + test "Should build tree the same tree sync and async": + var tp = Taskpool.new(numThreads = 2) + defer: + tp.shutdown() + + let expectedLeaves = data.mapIt( + Cid.init(CidVersion.CIDv1, BlockCodec, MultiHash.digest($sha256, it).tryGet).tryGet + ) + + let + atree = (await CodexTree.init(tp, leaves = expectedLeaves)) + stree = CodexTree.init(leaves = expectedLeaves) + + check: + toSeq(atree.get().nodes) == toSeq(stree.get().nodes) + atree.get().root == stree.get().root + + # Single-leaf trees have their root separately computed + let + atree1 = (await CodexTree.init(tp, leaves = expectedLeaves[0 .. 0])) + stree1 = CodexTree.init(leaves = expectedLeaves[0 .. 0]) + + check: + toSeq(atree.get().nodes) == toSeq(stree.get().nodes) + atree.get().root == stree.get().root + test "Should build from raw digestbytes (should not hash leaves)": let tree = CodexTree.init(sha256, leaves = data).tryGet @@ -70,6 +129,18 @@ suite "Test CodexTree": tree.mcodec == sha256 tree.leaves == data + test "Should build from raw digestbytes (should not hash leaves) asynchronously": + var tp = Taskpool.new(numThreads = 2) + defer: + tp.shutdown() + + let tree = (await CodexTree.init(tp, sha256, leaves = @data)) + + check: + tree.isOk + tree.get().mcodec == sha256 + tree.get().leaves == data + test "Should build from nodes": let tree = CodexTree.init(sha256, leaves = data).tryGet @@ -82,10 +153,10 @@ suite "Test CodexTree": tree == fromNodes let - mhash = sha256.mhash().tryGet - zero: seq[byte] = newSeq[byte](mhash.size) + digestSize = sha256.digestSize.get + zero: seq[byte] = newSeq[byte](digestSize) compress = proc(x, y: seq[byte], key: ByteTreeKey): seq[byte] = - compress(x, y, key, mhash).tryGet + compress(x, y, key, sha256).tryGet makeTree = proc(data: seq[seq[byte]]): CodexTree = CodexTree.init(sha256, leaves = data).tryGet diff --git a/tests/codex/merkletree/testposeidon2tree.nim b/tests/codex/merkletree/testposeidon2tree.nim index e12751b7..8e7ce34d 100644 --- a/tests/codex/merkletree/testposeidon2tree.nim +++ b/tests/codex/merkletree/testposeidon2tree.nim @@ -1,6 +1,5 @@ import std/sequtils -import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/io import pkg/questionable/results @@ -9,9 +8,11 @@ import pkg/stew/byteutils import pkg/stew/arrayops import pkg/codex/merkletree +import pkg/taskpools import ./generictreetests import ./helpers +import ../../asynctest const data = [ "0000000000000000000000000000001".toBytes, @@ -36,13 +37,14 @@ suite "Test Poseidon2Tree": check: Poseidon2Tree.init(leaves = newSeq[Poseidon2Hash](0)).isErr - test "Init tree from poseidon2 leaves": - let tree = Poseidon2Tree.init(leaves = expectedLeaves).tryGet + test "Build tree from poseidon2 leaves": + var taskpool = Taskpool.new(numThreads = 2) + let tree = (await Poseidon2Tree.init(taskpool, leaves = expectedLeaves)).tryGet() check: tree.leaves == expectedLeaves - test "Init tree from byte leaves": + test "Build tree from byte leaves": let tree = Poseidon2Tree.init( leaves = expectedLeaves.mapIt(array[31, byte].initCopyFrom(it.toBytes)) ).tryGet @@ -50,7 +52,7 @@ suite "Test Poseidon2Tree": check: tree.leaves == expectedLeaves - test "Should build from nodes": + test "Build tree from nodes": let tree = Poseidon2Tree.init(leaves = expectedLeaves).tryGet fromNodes = Poseidon2Tree.fromNodes( @@ -60,6 +62,29 @@ suite "Test Poseidon2Tree": check: tree == fromNodes + test "Build poseidon2 tree from poseidon2 leaves asynchronously": + var tp = Taskpool.new() + defer: + tp.shutdown() + + let tree = (await Poseidon2Tree.init(tp, leaves = expectedLeaves)).tryGet() + check: + tree.leaves == expectedLeaves + + test "Build poseidon2 tree from byte leaves asynchronously": + var tp = Taskpool.new() + defer: + tp.shutdown() + + let tree = ( + await Poseidon2Tree.init( + tp, leaves = expectedLeaves.mapIt(array[31, byte].initCopyFrom(it.toBytes)) + ) + ).tryGet() + + check: + tree.leaves == expectedLeaves + let compressor = proc( x, y: Poseidon2Hash, key: PoseidonKeysEnum diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index e8d9c743..fcb91e8f 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -56,12 +56,13 @@ asyncchecksuite "Test Node - Host contracts": verifiable: Manifest verifiableBlock: bt.Block protected: Manifest + taskPool: Taskpool setup: # Setup Host Contracts and dependencies market = MockMarket.new() sales = Sales.new(market, clock, localStore) - + taskPool = Taskpool.new() node.contracts = ( none ClientInteractions, some HostInteractions.new(clock, sales), @@ -75,20 +76,23 @@ asyncchecksuite "Test Node - Host contracts": let manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskPool) manifestCid = manifestBlock.cid (await localStore.putBlock(manifestBlock)).tryGet() protected = (await erasure.encode(manifest, 3, 2)).tryGet() - builder = Poseidon2Builder.new(localStore, protected).tryGet() + builder = Poseidon2Builder.new(localStore, protected, taskPool).tryGet() verifiable = (await builder.buildManifest()).tryGet() verifiableBlock = bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(verifiableBlock)).tryGet() + teardown: + taskPool.shutdown() + test "onExpiryUpdate callback is set": check sales.onExpiryUpdate.isSome diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index a3fe7e94..81460c79 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -47,10 +47,15 @@ privateAccess(CodexNodeRef) # enable access to private fields asyncchecksuite "Test Node - Basic": setupAndTearDown() + var taskPool: Taskpool setup: + taskPool = Taskpool.new() await node.start() + teardown: + taskPool.shutdown() + test "Fetch Manifest": let manifest = await storeDataGetManifest(localStore, chunker) @@ -75,14 +80,14 @@ asyncchecksuite "Test Node - Basic": batchSize = batchSize, proc( blocks: seq[bt.Block] - ): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).} = + ): Future[?!void] {.async: (raises: [CancelledError]).} = check blocks.len > 0 and blocks.len <= batchSize return success(), ) ).tryGet() test "Block Batching with corrupted blocks": - let blocks = await makeRandomBlocks(datasetSize = 64.KiBs.int, blockSize = 64.KiBs) + let blocks = await makeRandomBlocks(datasetSize = 65536, blockSize = 64.KiBs) assert blocks.len == 1 let blk = blocks[0] @@ -100,7 +105,7 @@ asyncchecksuite "Test Node - Basic": batchSize = batchSize, proc( blocks: seq[bt.Block] - ): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).} = + ): Future[?!void] {.async: (raises: [CancelledError]).} = return failure("Should not be called"), ) ) @@ -173,14 +178,15 @@ asyncchecksuite "Test Node - Basic": check string.fromBytes(data) == testString test "Setup purchase request": + echo "Here the tedt" let - erasure = - Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new()) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskPool) manifest = await storeDataGetManifest(localStore, chunker) manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() protected = (await erasure.encode(manifest, 3, 2)).tryGet() - builder = Poseidon2Builder.new(localStore, protected).tryGet() + let + builder = Poseidon2Builder.new(localStore, protected, taskPool).tryGet() verifiable = (await builder.buildManifest()).tryGet() verifiableBlock = bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() diff --git a/tests/codex/node/testslotrepair.nim b/tests/codex/node/testslotrepair.nim index d96078d2..f06602e8 100644 --- a/tests/codex/node/testslotrepair.nim +++ b/tests/codex/node/testslotrepair.nim @@ -13,6 +13,7 @@ import pkg/codex/contracts import pkg/codex/slots import pkg/codex/manifest import pkg/codex/erasure +import pkg/taskpools import pkg/codex/blocktype as bt import pkg/chronos/transports/stream @@ -48,6 +49,7 @@ asyncchecksuite "Test Node - Slot Repair": findFreePorts: true, createFullNode: true, enableBootstrap: true, + enableDiscovery: true, ) var manifest: Manifest @@ -100,7 +102,7 @@ asyncchecksuite "Test Node - Slot Repair": (await localStore.putBlock(manifestBlock)).tryGet() protected = (await erasure.encode(manifest, ecK, ecM)).tryGet() - builder = Poseidon2Builder.new(localStore, protected).tryGet() + builder = Poseidon2Builder.new(localStore, protected, cluster.taskpool).tryGet() verifiable = (await builder.buildManifest()).tryGet() verifiableBlock = bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() @@ -118,6 +120,7 @@ asyncchecksuite "Test Node - Slot Repair": await nodes[1].switch.stop() # slot 0 missing now # repair missing slot + (await nodes[4].onStore(request, expiry, 0.uint64, nil, isRepairing = true)).tryGet() await nodes[2].switch.stop() # slot 1 missing now @@ -131,16 +134,19 @@ asyncchecksuite "Test Node - Slot Repair": await nodes[4].switch.stop() # slot 0 missing now # repair missing slot from repaired slots + (await nodes[7].onStore(request, expiry, 0.uint64, nil, isRepairing = true)).tryGet() await nodes[5].switch.stop() # slot 1 missing now # repair missing slot from repaired slots + (await nodes[8].onStore(request, expiry, 1.uint64, nil, isRepairing = true)).tryGet() await nodes[6].switch.stop() # slot 2 missing now # repair missing slot from repaired slots + (await nodes[9].onStore(request, expiry, 2.uint64, nil, isRepairing = true)).tryGet() let @@ -179,7 +185,7 @@ asyncchecksuite "Test Node - Slot Repair": (await localStore.putBlock(manifestBlock)).tryGet() protected = (await erasure.encode(manifest, ecK, ecM)).tryGet() - builder = Poseidon2Builder.new(localStore, protected).tryGet() + builder = Poseidon2Builder.new(localStore, protected, cluster.taskpool).tryGet() verifiable = (await builder.buildManifest()).tryGet() verifiableBlock = bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() @@ -198,19 +204,24 @@ asyncchecksuite "Test Node - Slot Repair": await nodes[3].switch.stop() # slot 2 missing now # repair missing slots + (await nodes[6].onStore(request, expiry, 0.uint64, nil, isRepairing = true)).tryGet() + (await nodes[7].onStore(request, expiry, 2.uint64, nil, isRepairing = true)).tryGet() await nodes[2].switch.stop() # slot 1 missing now await nodes[4].switch.stop() # slot 3 missing now # repair missing slots from repaired slots + (await nodes[8].onStore(request, expiry, 1.uint64, nil, isRepairing = true)).tryGet() + (await nodes[9].onStore(request, expiry, 3.uint64, nil, isRepairing = true)).tryGet() await nodes[5].switch.stop() # slot 4 missing now # repair missing slot from repaired slots + (await nodes[10].onStore(request, expiry, 4.uint64, nil, isRepairing = true)).tryGet() let diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index 48bdee9c..7d958ee3 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -342,9 +342,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability is created": var added: Availability - reservations.OnAvailabilitySaved = proc( - a: Availability - ) {.gcsafe, async: (raises: []).} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = added = a let availability = createAvailability() @@ -354,9 +352,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability size is increased": var availability = createAvailability() var added: Availability - reservations.OnAvailabilitySaved = proc( - a: Availability - ) {.gcsafe, async: (raises: []).} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = added = a availability.freeSize += 1 discard await reservations.update(availability) @@ -366,9 +362,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved is not called when availability size is decreased": var availability = createAvailability() var called = false - reservations.OnAvailabilitySaved = proc( - a: Availability - ) {.gcsafe, async: (raises: []).} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = called = true availability.freeSize -= 1.uint64 discard await reservations.update(availability) @@ -378,9 +372,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved is not called when availability is disabled": var availability = createAvailability(enabled = false) var called = false - reservations.OnAvailabilitySaved = proc( - a: Availability - ) {.gcsafe, async: (raises: []).} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = called = true availability.freeSize -= 1 discard await reservations.update(availability) diff --git a/tests/codex/slots/backends/testcircomcompat.nim b/tests/codex/slots/backends/testcircomcompat.nim index b61d4f18..637ee36b 100644 --- a/tests/codex/slots/backends/testcircomcompat.nim +++ b/tests/codex/slots/backends/testcircomcompat.nim @@ -3,6 +3,7 @@ import std/options import ../../../asynctest import pkg/chronos +import pkg/taskpools import pkg/poseidon2 import pkg/serde/json @@ -77,6 +78,7 @@ suite "Test Circom Compat Backend": challenge: array[32, byte] builder: Poseidon2Builder sampler: Poseidon2Sampler + taskPool: Taskpool setup: let @@ -85,11 +87,13 @@ suite "Test Circom Compat Backend": store = RepoStore.new(repoDs, metaDs) + taskPool = Taskpool.new() + (manifest, protected, verifiable) = await createVerifiableManifest( - store, numDatasetBlocks, ecK, ecM, blockSize, cellSize + store, numDatasetBlocks, ecK, ecM, blockSize, cellSize, taskPool ) - builder = Poseidon2Builder.new(store, verifiable).tryGet + builder = Poseidon2Builder.new(store, verifiable, taskPool).tryGet sampler = Poseidon2Sampler.new(slotId, store, builder).tryGet circom = CircomCompat.init(r1cs, wasm, zkey) @@ -101,6 +105,7 @@ suite "Test Circom Compat Backend": circom.release() # this comes from the rust FFI await repoTmp.destroyDb() await metaTmp.destroyDb() + taskPool.shutdown() test "Should verify with correct input": var proof = circom.prove(proofInputs).tryGet diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index fced1f1c..01159c21 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -12,6 +12,7 @@ import pkg/codex/chunker import pkg/codex/indexingstrategy import pkg/codex/slots import pkg/codex/rng +import pkg/taskpools import ../helpers @@ -145,6 +146,7 @@ proc createVerifiableManifest*( ecM: int, blockSize: NBytes, cellSize: NBytes, + taskPool: Taskpool, ): Future[tuple[manifest: Manifest, protected: Manifest, verifiable: Manifest]] {. async .} = @@ -165,7 +167,9 @@ proc createVerifiableManifest*( totalDatasetSize, ) - builder = Poseidon2Builder.new(store, protectedManifest, cellSize = cellSize).tryGet + builder = Poseidon2Builder.new( + store, protectedManifest, cellSize = cellSize, taskPool = taskPool + ).tryGet verifiableManifest = (await builder.buildManifest()).tryGet # build the slots and manifest diff --git a/tests/codex/slots/sampler/testsampler.nim b/tests/codex/slots/sampler/testsampler.nim index 78b245a3..bf7277a3 100644 --- a/tests/codex/slots/sampler/testsampler.nim +++ b/tests/codex/slots/sampler/testsampler.nim @@ -5,6 +5,7 @@ import ../../../asynctest import pkg/questionable/results +import pkg/taskpools import pkg/codex/stores import pkg/codex/merkletree import pkg/codex/utils/json @@ -26,11 +27,16 @@ suite "Test Sampler - control samples": inputData: string inputJson: JsonNode proofInput: ProofInputs[Poseidon2Hash] + taskpool: Taskpool setup: inputData = readFile("tests/circuits/fixtures/input.json") inputJson = !JsonNode.parse(inputData) proofInput = Poseidon2Hash.jsonToProofInput(inputJson) + taskpool = Taskpool.new() + + teardown: + taskpool.shutdown() test "Should verify control samples": let @@ -87,25 +93,29 @@ suite "Test Sampler": manifest: Manifest protected: Manifest verifiable: Manifest + taskpool: Taskpool setup: let repoDs = repoTmp.newDb() metaDs = metaTmp.newDb() + taskpool = Taskpool.new() + store = RepoStore.new(repoDs, metaDs) (manifest, protected, verifiable) = await createVerifiableManifest( - store, datasetBlocks, ecK, ecM, blockSize, cellSize + store, datasetBlocks, ecK, ecM, blockSize, cellSize, taskpool ) # create sampler - builder = Poseidon2Builder.new(store, verifiable).tryGet + builder = Poseidon2Builder.new(store, verifiable, taskpool).tryGet teardown: await store.close() await repoTmp.destroyDb() await metaTmp.destroyDb() + taskpool.shutdown() test "Should fail instantiating for invalid slot index": let sampler = Poseidon2Sampler.new(builder.slotRoots.len, store, builder) @@ -114,7 +124,7 @@ suite "Test Sampler": test "Should fail instantiating for non verifiable builder": let - nonVerifiableBuilder = Poseidon2Builder.new(store, protected).tryGet + nonVerifiableBuilder = Poseidon2Builder.new(store, protected, taskpool).tryGet sampler = Poseidon2Sampler.new(slotIndex, store, nonVerifiableBuilder) check sampler.isErr diff --git a/tests/codex/slots/testprover.nim b/tests/codex/slots/testprover.nim index c567db55..34ff96ba 100644 --- a/tests/codex/slots/testprover.nim +++ b/tests/codex/slots/testprover.nim @@ -4,6 +4,7 @@ import pkg/chronos import pkg/libp2p/cid import pkg/codex/merkletree +import pkg/taskpools import pkg/codex/chunker import pkg/codex/blocktype as bt import pkg/codex/slots @@ -29,6 +30,7 @@ suite "Test Prover": var store: BlockStore prover: Prover + taskPool: Taskpool setup: let @@ -45,13 +47,14 @@ suite "Test Prover": numProofSamples: samples, ) backend = config.initializeBackend().tryGet() - + taskPool = Taskpool.new() store = RepoStore.new(repoDs, metaDs) - prover = Prover.new(store, backend, config.numProofSamples) + prover = Prover.new(store, backend, config.numProofSamples, taskPool) teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() + taskPool.shutdown() test "Should sample and prove a slot": let (_, _, verifiable) = await createVerifiableManifest( @@ -61,6 +64,7 @@ suite "Test Prover": 3, # ecM blockSize, cellSize, + taskPool, ) let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet @@ -80,6 +84,7 @@ suite "Test Prover": 1, # ecM blockSize, cellSize, + taskPool, ) let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim index fc3c7bd5..55f917ef 100644 --- a/tests/codex/slots/testslotbuilder.nim +++ b/tests/codex/slots/testslotbuilder.nim @@ -15,6 +15,7 @@ import pkg/codex/utils import pkg/codex/utils/digest import pkg/poseidon2 import pkg/poseidon2/io +import pkg/taskpools import ./helpers import ../helpers @@ -72,12 +73,13 @@ suite "Slot builder": protectedManifest: Manifest builder: Poseidon2Builder chunker: Chunker + taskPool: Taskpool setup: let repoDs = repoTmp.newDb() metaDs = metaTmp.newDb() - + taskPool = Taskpool.new() localStore = RepoStore.new(repoDs, metaDs) chunker = RandomChunker.new(Rng.instance(), size = totalDatasetSize, chunkSize = blockSize) @@ -92,6 +94,7 @@ suite "Slot builder": await localStore.close() await repoTmp.destroyDb() await metaTmp.destroyDb() + taskPool.shutdown() # TODO: THIS IS A BUG IN asynctest, because it doesn't release the # objects after the test is done, so we need to do it manually @@ -113,8 +116,9 @@ suite "Slot builder": ) check: - Poseidon2Builder.new(localStore, unprotectedManifest, cellSize = cellSize).error.msg == - "Manifest is not protected." + Poseidon2Builder.new( + localStore, unprotectedManifest, taskPool, cellSize = cellSize + ).error.msg == "Manifest is not protected." test "Number of blocks must be devisable by number of slots": let mismatchManifest = Manifest.new( @@ -131,7 +135,7 @@ suite "Slot builder": ) check: - Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == + Poseidon2Builder.new(localStore, mismatchManifest, taskPool, cellSize = cellSize).error.msg == "Number of blocks must be divisible by number of slots." test "Block size must be divisable by cell size": @@ -149,12 +153,13 @@ suite "Slot builder": ) check: - Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == + Poseidon2Builder.new(localStore, mismatchManifest, taskPool, cellSize = cellSize).error.msg == "Block size must be divisible by cell size." test "Should build correct slot builder": - builder = - Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) + .tryGet() check: builder.cellSize == cellSize @@ -171,7 +176,7 @@ suite "Slot builder": ) builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() for i in 0 ..< numSlots: @@ -196,7 +201,7 @@ suite "Slot builder": ) builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() for i in 0 ..< numSlots: @@ -215,8 +220,9 @@ suite "Slot builder": slotTree.root().tryGet() == expectedRoot test "Should persist trees for all slots": - let builder = - Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() + let builder = Poseidon2Builder + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) + .tryGet() for i in 0 ..< numSlots: let @@ -242,7 +248,7 @@ suite "Slot builder": 0, protectedManifest.blocksCount - 1, numSlots, numSlots, numPadSlotBlocks ) builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() (await builder.buildSlots()).tryGet @@ -270,7 +276,7 @@ suite "Slot builder": 0, protectedManifest.blocksCount - 1, numSlots, numSlots, numPadSlotBlocks ) builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() slotsHashes = collect(newSeq): @@ -296,45 +302,53 @@ suite "Slot builder": test "Should not build from verifiable manifest with 0 slots": var builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() verifyManifest.slotRoots = @[] - check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr + check Poseidon2Builder.new( + localStore, verifyManifest, taskPool, cellSize = cellSize + ).isErr test "Should not build from verifiable manifest with incorrect number of slots": var builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() verifyManifest.slotRoots.del(verifyManifest.slotRoots.len - 1) - check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr + check Poseidon2Builder.new( + localStore, verifyManifest, taskPool, cellSize = cellSize + ).isErr test "Should not build from verifiable manifest with invalid verify root": - let builder = - Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() + let builder = Poseidon2Builder + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) + .tryGet() var verifyManifest = (await builder.buildManifest()).tryGet() rng.shuffle(Rng.instance, verifyManifest.verifyRoot.data.buffer) - check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr + check Poseidon2Builder.new( + localStore, verifyManifest, taskPool, cellSize = cellSize + ).isErr test "Should build from verifiable manifest": let builder = Poseidon2Builder - .new(localStore, protectedManifest, cellSize = cellSize) + .new(localStore, protectedManifest, taskPool, cellSize = cellSize) .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() - verificationBuilder = - Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).tryGet() + verificationBuilder = Poseidon2Builder + .new(localStore, verifyManifest, taskPool, cellSize = cellSize) + .tryGet() check: builder.slotRoots == verificationBuilder.slotRoots diff --git a/tests/codex/stores/commonstoretests.nim b/tests/codex/stores/commonstoretests.nim index e4287dd2..d3132773 100644 --- a/tests/codex/stores/commonstoretests.nim +++ b/tests/codex/stores/commonstoretests.nim @@ -38,8 +38,8 @@ proc commonBlockStoreTests*( newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet() newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet() - (manifest, tree) = - makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() + (_, tree, manifest) = + makeDataset(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() if not isNil(before): await before() diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index f1bff8bd..799e1884 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index f56adcc3..7c979d4c 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index 69f38711..7eb9fd0d 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -364,9 +364,11 @@ asyncchecksuite "RepoStore": let repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 1000'nb) - dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) - blk = dataset[0] - (manifest, tree) = makeManifestAndTree(dataset).tryGet() + (blocks, tree, manifest) = makeDataset( + await makeRandomBlocks(datasetSize = 2 * 256, blockSize = 256'nb) + ) + .tryGet() + blk = blocks[0] treeCid = tree.rootCid.tryGet() proof = tree.getProof(0).tryGet() @@ -381,9 +383,11 @@ asyncchecksuite "RepoStore": let repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 1000'nb) - dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) - blk = dataset[0] - (manifest, tree) = makeManifestAndTree(dataset).tryGet() + (blocks, tree, manifest) = makeDataset( + await makeRandomBlocks(datasetSize = 2 * 256, blockSize = 256'nb) + ) + .tryGet() + blk = blocks[0] treeCid = tree.rootCid.tryGet() proof = tree.getProof(0).tryGet() @@ -406,9 +410,9 @@ asyncchecksuite "RepoStore": let sharedBlock = blockPool[1] let - (manifest1, tree1) = makeManifestAndTree(dataset1).tryGet() + (_, tree1, manifest1) = makeDataset(dataset1).tryGet() treeCid1 = tree1.rootCid.tryGet() - (manifest2, tree2) = makeManifestAndTree(dataset2).tryGet() + (_, tree2, manifest2) = makeDataset(dataset2).tryGet() treeCid2 = tree2.rootCid.tryGet() (await repo.putBlock(sharedBlock)).tryGet() @@ -435,9 +439,9 @@ asyncchecksuite "RepoStore": let repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 1000'nb) - dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) - blk = dataset[0] - (manifest, tree) = makeManifestAndTree(dataset).tryGet() + blocks = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = blocks[0] + (_, tree, manifest) = makeDataset(blocks).tryGet() treeCid = tree.rootCid.tryGet() proof = tree.getProof(1).tryGet() @@ -455,9 +459,9 @@ asyncchecksuite "RepoStore": let repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 1000'nb) - dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) - blk = dataset[0] - (manifest, tree) = makeManifestAndTree(dataset).tryGet() + blocks = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = blocks[0] + (_, tree, manifest) = makeDataset(blocks).tryGet() treeCid = tree.rootCid.tryGet() proof = tree.getProof(1).tryGet() diff --git a/tests/codex/testblocktype.nim b/tests/codex/testblocktype.nim new file mode 100644 index 00000000..b0ea2732 --- /dev/null +++ b/tests/codex/testblocktype.nim @@ -0,0 +1,44 @@ +import pkg/unittest2 +import pkg/libp2p/cid + +import pkg/codex/blocktype + +import ./examples + +suite "blocktype": + test "should hash equal non-leaf block addresses onto the same hash": + let + cid1 = Cid.example + nonLeaf1 = BlockAddress.init(cid1) + nonLeaf2 = BlockAddress.init(cid1) + + check nonLeaf1 == nonLeaf2 + check nonLeaf1.hash == nonLeaf2.hash + + test "should hash equal leaf block addresses onto the same hash": + let + cid1 = Cid.example + leaf1 = BlockAddress.init(cid1, 0) + leaf2 = BlockAddress.init(cid1, 0) + + check leaf1 == leaf2 + check leaf1.hash == leaf2.hash + + test "should hash different non-leaf block addresses onto different hashes": + let + cid1 = Cid.example + cid2 = Cid.example + nonLeaf1 = BlockAddress.init(cid1) + nonLeaf2 = BlockAddress.init(cid2) + + check nonLeaf1 != nonLeaf2 + check nonLeaf1.hash != nonLeaf2.hash + + test "should hash different leaf block addresses onto different hashes": + let + cid1 = Cid.example + leaf1 = BlockAddress.init(cid1, 0) + leaf2 = BlockAddress.init(cid1, 1) + + check leaf1 != leaf2 + check leaf1.hash != leaf2.hash diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 44202c40..531d92bf 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -18,7 +18,7 @@ type CrashingStreamWrapper* = ref object of LPStream method readOnce*( self: CrashingStreamWrapper, pbytes: pointer, nbytes: int -): Future[int] {.gcsafe, async: (raises: [CancelledError, LPStreamError]).} = +): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} = self.toRaise() asyncchecksuite "Chunking": @@ -27,7 +27,7 @@ asyncchecksuite "Chunking": let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} = let read = min(contents.len - offset, len) if read == 0: return 0 diff --git a/tests/codex/testnat.nim b/tests/codex/testnat.nim index 3981b2e6..a4161c6f 100644 --- a/tests/codex/testnat.nim +++ b/tests/codex/testnat.nim @@ -1,4 +1,4 @@ -import std/[unittest, options, net], stew/shims/net as stewNet +import std/[unittest, options, net] import pkg/chronos import pkg/libp2p/[multiaddress, multihash, multicodec] import pkg/results diff --git a/tests/codex/utils/testPoseidon.nim b/tests/codex/utils/testPoseidon.nim new file mode 100644 index 00000000..aedf5fcf --- /dev/null +++ b/tests/codex/utils/testPoseidon.nim @@ -0,0 +1,40 @@ +{.push raises: [].} + +import std/[times, strformat, random] +import pkg/questionable/results + +import pkg/codex/merkletree/poseidon2 + +import pkg/codex/utils/poseidon2digest +import ../../asynctest + +test "Test poseidon2 digestTree": + randomize(42) + const + dataSize = 64 * 1024 # 64KB + chunkSize = 2 * 1024 # 2KB + iterations = 10 # Number of iterations + + echo &"Benchmarking digestTree with data size: {dataSize} bytes, chunk size: {chunkSize} bytes" + + # Generate random data + var data = newSeq[byte](dataSize) + for i in 0 ..< dataSize: + data[i] = byte(rand(255)) + + # Actual benchmark + let startTime = cpuTime() + + for i in 1 .. iterations: + let treeResult = Poseidon2Tree.digestTree(data, chunkSize).tryGet() + + # Optionally print info about each iteration + + let endTime = cpuTime() + let totalTime = endTime - startTime + let avgTime = totalTime / iterations.float + + echo &"Results:" + echo &" Total time for {iterations} iterations: {totalTime:.6f} seconds" + echo &" Average time per iteration: {avgTime:.6f} seconds" + echo &" Iterations per second: {iterations.float / totalTime:.2f}" diff --git a/tests/codex/utils/testasyncstatemachine.nim b/tests/codex/utils/testasyncstatemachine.nim index ed3ea747..c46bb605 100644 --- a/tests/codex/utils/testasyncstatemachine.nim +++ b/tests/codex/utils/testasyncstatemachine.nim @@ -1,6 +1,5 @@ import pkg/questionable import pkg/chronos -import pkg/upraises import codex/utils/asyncstatemachine import ../../asynctest @@ -36,7 +35,7 @@ method run(state: State2, machine: Machine): Future[?State] {.async: (raises: [] method run(state: State3, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[2] -method onMoveToNextStateEvent*(state: State): ?State {.base, upraises: [].} = +method onMoveToNextStateEvent*(state: State): ?State {.base, raises: [].} = discard method onMoveToNextStateEvent(state: State2): ?State = diff --git a/tests/codex/utils/testsafeasynciter.nim b/tests/codex/utils/testsafeasynciter.nim index 1aeba4d2..87b0d84a 100644 --- a/tests/codex/utils/testsafeasynciter.nim +++ b/tests/codex/utils/testsafeasynciter.nim @@ -373,7 +373,7 @@ asyncchecksuite "Test SafeAsyncIter": # Now, to make sure that this mechanism works, and to document its # cancellation semantics, this test shows that when the async predicate # function is cancelled, this cancellation has immediate effect, which means - # that `next()` (or more precisely `getNext()` in `mapFilter` function), is + # that `next()` (or more precisely `getNext()` in `mapFilter` function), is # interrupted immediately. If this is the case, the the iterator be interrupted # before `next()` returns this locally captured value from the previous # iteration and this is exactly the reason why at the end of the test @@ -415,3 +415,20 @@ asyncchecksuite "Test SafeAsyncIter": # will not be returned because of the cancellation. collected == @["0", "1"] iter2.finished + + test "should allow chaining": + let + iter1 = SafeAsyncIter[int].new(0 ..< 5) + iter2 = SafeAsyncIter[int].new(5 ..< 10) + iter3 = chain[int](iter1, SafeAsyncIter[int].empty, iter2) + + var collected: seq[int] + + for fut in iter3: + without i =? (await fut), err: + fail() + collected.add(i) + + check: + iter3.finished + collected == @[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] diff --git a/tests/codex/utils/testtimer.nim b/tests/codex/utils/testtimer.nim index 47076480..cf05df19 100644 --- a/tests/codex/utils/testtimer.nim +++ b/tests/codex/utils/testtimer.nim @@ -1,4 +1,4 @@ -## Nim-Codex +## Logos Storage ## Copyright (c) 2023 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) diff --git a/tests/examples.nim b/tests/examples.nim index 9ef4e292..2b7e96d6 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -38,7 +38,7 @@ proc example*[T](_: type seq[T]): seq[T] = newSeqWith(length, T.example) proc example*(_: type UInt256): UInt256 = - UInt256.fromBytes(array[32, byte].example) + UInt256.fromBytesBE(array[32, byte].example) proc example*[T: distinct](_: type T): T = type baseType = T.distinctBase diff --git a/tests/helpers.nim b/tests/helpers.nim index bcac03c3..e938015f 100644 --- a/tests/helpers.nim +++ b/tests/helpers.nim @@ -9,12 +9,11 @@ import ./asynctest export multisetup, trackers, templeveldb ### taken from libp2p errorhelpers.nim -proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] = +proc allFuturesThrowing(futs: seq[FutureBase]): Future[void] = # This proc is only meant for use in tests / not suitable for general use. # - Swallowing errors arbitrarily instead of aggregating them is bad design # - It raises `CatchableError` instead of the union of the `futs` errors, # inflating the caller's `raises` list unnecessarily. `macro` could fix it - let futs = @args ( proc() {.async: (raises: [CatchableError]).} = await allFutures(futs) @@ -30,6 +29,9 @@ proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] = raise firstErr )() +proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] = + allFuturesThrowing(@args) + proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] = allFuturesThrowing(futs.mapIt(FutureBase(it))) diff --git a/tests/integration/5_minutes/testrestapivalidation.nim b/tests/integration/5_minutes/testrestapivalidation.nim index d428402e..6530f355 100644 --- a/tests/integration/5_minutes/testrestapivalidation.nim +++ b/tests/integration/5_minutes/testrestapivalidation.nim @@ -246,7 +246,7 @@ multinodesuite "Rest API validation": let data = await RandomChunker.example(blocks = 2) let cid = (await client.upload(data)).get let duration = (31 * 24 * 60 * 60).uint64 - # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 + # 31 days TODO: this should not be hardcoded, but waits for https://github.com/logos-storage/logos-storage-nim/issues/1056 let proofProbability = 3.u256 let expiry = 30.uint let collateralPerByte = 1.u256 diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim index 67adfae7..16406c0b 100644 --- a/tests/integration/codexprocess.nim +++ b/tests/integration/codexprocess.nim @@ -43,7 +43,7 @@ method workingDir(node: CodexProcess): string = return currentSourcePath() / ".." / ".." / ".." method executable(node: CodexProcess): string = - return "build" / "codex" + return "build" / "storage" method startedOutput(node: CodexProcess): string = return "REST service started" @@ -78,7 +78,7 @@ proc ethAccount*(node: CodexProcess): Address {.raises: [CodexProcessError].} = proc apiUrl*(node: CodexProcess): string {.raises: [CodexProcessError].} = let config = node.config - return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" + return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/storage/v1" proc logFile*(node: CodexProcess): ?string {.raises: [CodexProcessError].} = node.config.logFile diff --git a/tests/integration/hardhatprocess.nim b/tests/integration/hardhatprocess.nim index a9db3ea2..8342f05f 100644 --- a/tests/integration/hardhatprocess.nim +++ b/tests/integration/hardhatprocess.nim @@ -34,7 +34,8 @@ type HardhatProcessError* = object of NodeProcessError method workingDir(node: HardhatProcess): string = - return currentSourcePath() / ".." / ".." / ".." / "vendor" / "codex-contracts-eth" + return + currentSourcePath() / ".." / ".." / ".." / "vendor" / "logos-storage-contracts-eth" method executable(node: HardhatProcess): string = return diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index 552e7c37..016794dd 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -68,8 +68,17 @@ template withLock(lock: AsyncLock, body: untyped) = except AsyncLockError as parent: raiseMultiNodeSuiteError "lock error", parent -template multinodesuite*(suiteName: string, body: untyped) = - asyncchecksuite suiteName: +proc sanitize(pathSegment: string): string = + var sanitized = pathSegment + for invalid in invalidFilenameChars.items: + sanitized = sanitized.replace(invalid, '_').replace(' ', '_') + sanitized + +proc getTempDirName*(starttime: string, role: Role, roleIdx: int): string = + getTempDir() / "Storage" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) + +template multinodesuite*(name: string, body: untyped) = + asyncchecksuite name: # Following the problem described here: # https://github.com/NomicFoundation/hardhat/issues/2053 # It may be desirable to use http RPC provider. @@ -268,15 +277,15 @@ template multinodesuite*(suiteName: string, body: untyped) = ) config.addCliOption( PersistenceCmd.prover, "--circom-r1cs", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs", + "vendor/logos-storage-contracts-eth/verifier/networks/hardhat/proof_main.r1cs", ) config.addCliOption( PersistenceCmd.prover, "--circom-wasm", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm", + "vendor/logos-storage-contracts-eth/verifier/networks/hardhat/proof_main.wasm", ) config.addCliOption( PersistenceCmd.prover, "--circom-zkey", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey", + "vendor/logos-storage-contracts-eth/verifier/networks/hardhat/proof_main.zkey", ) return await newCodexProcess(providerIdx, config, Role.Provider) diff --git a/tests/integration/nodeprocess.nim b/tests/integration/nodeprocess.nim index 827a4b0f..07d4a9af 100644 --- a/tests/integration/nodeprocess.nim +++ b/tests/integration/nodeprocess.nim @@ -58,7 +58,7 @@ method start*(node: NodeProcess) {.base, async: (raises: [CancelledError]).} = try: if node.debug: - echo "starting codex node with args: ", node.arguments.join(" ") + echo "starting Storage node with args: ", node.arguments.join(" ") node.process = await startProcess( node.executable, node.workingDir, @@ -105,7 +105,7 @@ proc captureOutput( proc startNode*[T: NodeProcess]( _: type T, args: seq[string], debug: string | bool = false, name: string ): Future[T] {.async: (raises: [CancelledError]).} = - ## Starts a Codex Node with the specified arguments. + ## Starts a Logos Storage Node with the specified arguments. ## Set debug to 'true' to see output of the node. let node = T( arguments: @args, diff --git a/tools/cirdl/cirdl.nim b/tools/cirdl/cirdl.nim index 19a94de4..5b363fc3 100644 --- a/tools/cirdl/cirdl.nim +++ b/tools/cirdl/cirdl.nim @@ -28,7 +28,7 @@ proc printHelp() = info "Usage: ./cirdl [circuitPath] [rpcEndpoint] ([marketplaceAddress])" info " circuitPath: path where circuit files will be placed." info " rpcEndpoint: URL of web3 RPC endpoint." - info " marketplaceAddress: Address of deployed Codex marketplace contracts. If left out, will auto-discover based on connected network." + info " marketplaceAddress: Address of deployed Storage marketplace contracts. If left out, will auto-discover based on connected network." proc getMarketplaceAddress( provider: JsonRpcProvider, mpAddressOverride: ?Address @@ -83,7 +83,7 @@ proc copyFiles(unpackDir: string, circuitPath: string): ?!void = success() proc main() {.async.} = - info "Codex Circuit Downloader, Aww yeah!" + info "Storage Circuit Downloader, Aww yeah!" let args = os.commandLineParams() if args.len < 2 or args.len > 3: printHelp() @@ -135,7 +135,7 @@ proc main() {.async.} = # Unpack library cannot unpack into existing directory. We also cannot # delete the targer directory and have the library recreate it because - # Codex has likely created it and set correct permissions. + # Logos Storage has likely created it and set correct permissions. # So, we unpack to a temp folder and move the files. if err =? copyFiles(unpackFolder, circuitPath).errorOption: error "Failed to copy files", msg = err.msg diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth deleted file mode 160000 index a179deb2..00000000 --- a/vendor/codex-contracts-eth +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a179deb2f99fa74ac7b0e568ec6d05f1d9a7a407 diff --git a/vendor/codex-storage-proofs-circuits b/vendor/codex-storage-proofs-circuits deleted file mode 160000 index ac8d3667..00000000 --- a/vendor/codex-storage-proofs-circuits +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ac8d3667526862458b162bee71dd5dcf6170c209 diff --git a/vendor/logos-storage-contracts-eth b/vendor/logos-storage-contracts-eth new file mode 160000 index 00000000..e159ceee --- /dev/null +++ b/vendor/logos-storage-contracts-eth @@ -0,0 +1 @@ +Subproject commit e159ceee44e91f216533a16e9f14cfd91dee0e02 diff --git a/vendor/logos-storage-nim-dht b/vendor/logos-storage-nim-dht new file mode 160000 index 00000000..99884b59 --- /dev/null +++ b/vendor/logos-storage-nim-dht @@ -0,0 +1 @@ +Subproject commit 99884b5971759a0da437db3d2e834b92a058527d diff --git a/vendor/logos-storage-proofs-circuits b/vendor/logos-storage-proofs-circuits new file mode 160000 index 00000000..82de3564 --- /dev/null +++ b/vendor/logos-storage-proofs-circuits @@ -0,0 +1 @@ +Subproject commit 82de35640d2b05557143354c9a0f28ff81cd5fec diff --git a/vendor/lrucache.nim b/vendor/lrucache.nim index 8767ade0..ba577369 160000 --- a/vendor/lrucache.nim +++ b/vendor/lrucache.nim @@ -1 +1 @@ -Subproject commit 8767ade0b76ea5b5d4ce24a52d0c58a6ebeb66cd +Subproject commit ba57736921b2972163b673fc706e7659e7c5cbd6 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index 667b4044..f08d7220 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit 667b40440a53a58e9f922e29e20818720c62d9ac +Subproject commit f08d72203f9e110c099c6f393e1c0640fcbe176f diff --git a/vendor/nim-blscurve b/vendor/nim-blscurve index de2d3c79..f4d0de2e 160000 --- a/vendor/nim-blscurve +++ b/vendor/nim-blscurve @@ -1 +1 @@ -Subproject commit de2d3c79264bba18dbea469c8c5c4b3bb3c8bc55 +Subproject commit f4d0de2eece20380541fbf73d4b8bf57dc214b3b diff --git a/vendor/nim-circom-compat b/vendor/nim-circom-compat index d3fb9039..a5d6f766 160000 --- a/vendor/nim-circom-compat +++ b/vendor/nim-circom-compat @@ -1 +1 @@ -Subproject commit d3fb903945c3895f28a2e50685745e0a9762ece5 +Subproject commit a5d6f76654616ed981beb1104997f519ca79c7ed diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht deleted file mode 160000 index f6eef1ac..00000000 --- a/vendor/nim-codex-dht +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f6eef1ac95c70053b2518f1e3909c909ed8701a6 diff --git a/vendor/nim-contract-abi b/vendor/nim-contract-abi index 842f4891..0a7b4cec 160000 --- a/vendor/nim-contract-abi +++ b/vendor/nim-contract-abi @@ -1 +1 @@ -Subproject commit 842f48910be4f388bcbf8abf1f02aba1d5e2ee64 +Subproject commit 0a7b4cecce725bcb11ad8648035a92704a8854d3 diff --git a/vendor/nim-eth b/vendor/nim-eth index dcfbc429..d9135e6c 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit dcfbc4291d39b59563828c3e32be4d51a2f25931 +Subproject commit d9135e6c3c5d6d819afdfb566aa8d958756b73a8 diff --git a/vendor/nim-ethers b/vendor/nim-ethers index 30871c7b..965b8cd7 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit 30871c7b1d5784e36c51223bd36ef6f1fffcc030 +Subproject commit 965b8cd752544df96b5effecbbd27a8f56a25d62 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index cf8d4d22..ce27581a 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit cf8d4d22636b8e514caf17e49f9c786ac56b0e85 +Subproject commit ce27581a3e881f782f482cb66dc5b07a02bd615e diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index 8bb1acba..c53852d9 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit 8bb1acbaa4b86eb866145b0d468eff64a57d1897 +Subproject commit c53852d9e24205b6363bba517fa8ee7bde823691 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index cbe8edf6..b6e40a77 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit cbe8edf69d743a787b76b1cd25bfc4eae89927f7 +Subproject commit b6e40a776fa2d00b97a9366761fb7da18f31ae5c diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index 6eadb6e9..a6dcf03e 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit 6eadb6e939ffa7882ff5437033c11a9464d3385c +Subproject commit a6dcf03e04e179127a5fcb7e495d19a821d56c17 diff --git a/vendor/nim-leopard b/vendor/nim-leopard index 7506b90f..0478b12d 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit 7506b90f9c650c02b96bf525d4fd1bd4942a495f +Subproject commit 0478b12df90cbbe531efa69422cff67b5a3a5d93 diff --git a/vendor/nim-leveldbstatic b/vendor/nim-leveldbstatic index 378ef63e..5a0cd8de 160000 --- a/vendor/nim-leveldbstatic +++ b/vendor/nim-leveldbstatic @@ -1 +1 @@ -Subproject commit 378ef63e261e3b5834a3567404edc3ce838498b3 +Subproject commit 5a0cd8de6b2363827c43cafd3ed346ecee427e1e diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace index 6da0cda8..99bc2ba1 160000 --- a/vendor/nim-libbacktrace +++ b/vendor/nim-libbacktrace @@ -1 +1 @@ -Subproject commit 6da0cda88ab7780bd5fd342327adb91ab84692aa +Subproject commit 99bc2ba16bc2d44f9a97e706304f64744d913d7f diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 7eaf79fe..e82080f7 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 7eaf79fefe45b03a4281e3505d5fcc97b32df39c +Subproject commit e82080f7b1aa61c6d35fa5311b873f41eff4bb52 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index cacfdc12..9b9afee9 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit cacfdc12454a0804c65112b9f4f50d1375208dcd +Subproject commit 9b9afee96357ad82dabf4563cf292f89b50423df diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal index 6508ce75..860e18c3 160000 --- a/vendor/nim-nat-traversal +++ b/vendor/nim-nat-traversal @@ -1 +1 @@ -Subproject commit 6508ce75060878dfcdfa21f94721672c69a1823b +Subproject commit 860e18c37667b5dd005b94c63264560c35d88004 diff --git a/vendor/nim-ngtcp2 b/vendor/nim-ngtcp2 index 6834f475..791eb859 160000 --- a/vendor/nim-ngtcp2 +++ b/vendor/nim-ngtcp2 @@ -1 +1 @@ -Subproject commit 6834f4756b6af58356ac9c4fef3d71db3c3ae5fe +Subproject commit 791eb859145f9f268eb23eb9cbe777bdd7699c4d diff --git a/vendor/nim-nitro b/vendor/nim-nitro index e3719433..5ccdeb46 160000 --- a/vendor/nim-nitro +++ b/vendor/nim-nitro @@ -1 +1 @@ -Subproject commit e3719433d5ace25947c468787c805969642b3913 +Subproject commit 5ccdeb46e06dcf5cef80d0acbb80ee8a17d596e7 diff --git a/vendor/nim-presto b/vendor/nim-presto index 92b1c7ff..62225bfa 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be +Subproject commit 62225bfa7ce703a99e04680bfc3498e69b52897f diff --git a/vendor/nim-protobuf-serialization b/vendor/nim-protobuf-serialization index 5a31137a..4d74e157 160000 --- a/vendor/nim-protobuf-serialization +++ b/vendor/nim-protobuf-serialization @@ -1 +1 @@ -Subproject commit 5a31137a82c2b6a989c9ed979bb636c7a49f570e +Subproject commit 4d74e157cdf1bdcd0ffd41519ebde740c4b80447 diff --git a/vendor/nim-quic b/vendor/nim-quic index ddcb31ff..525842ae 160000 --- a/vendor/nim-quic +++ b/vendor/nim-quic @@ -1 +1 @@ -Subproject commit ddcb31ffb74b5460ab37fd13547eca90594248bc +Subproject commit 525842aeca6111fd5035568d0f59aa2b338cc29d diff --git a/vendor/nim-serde b/vendor/nim-serde index 5ced7c88..649ae60e 160000 --- a/vendor/nim-serde +++ b/vendor/nim-serde @@ -1 +1 @@ -Subproject commit 5ced7c88b97d99c582285ce796957fb71fd42434 +Subproject commit 649ae60e05ec432738d41eb8d613c5d7f434c4a3 diff --git a/vendor/nim-serialization b/vendor/nim-serialization index 2086c996..b0f2fa32 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit 2086c99608b4bf472e1ef5fe063710f280243396 +Subproject commit b0f2fa32960ea532a184394b0f27be37bd80248b diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index 05bbff1a..6797c318 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit 05bbff1af4e8fe2d972ba4b0667b89ca94d3ebba +Subproject commit 6797c31836bff377bf50f1ac7bf8122449bf99ba diff --git a/vendor/nim-stew b/vendor/nim-stew index a6e19813..b6616873 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit a6e198132097fb544d04959aeb3b839e1408f942 +Subproject commit b66168735d6f3841c5239c3169d3fe5fe98b1257 diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools index 66585e2e..97f76fae 160000 --- a/vendor/nim-taskpools +++ b/vendor/nim-taskpools @@ -1 +1 @@ -Subproject commit 66585e2e960b7695e48ea60377fb3aeac96406e8 +Subproject commit 97f76faef6ba64bc77d9808c27ec5e9917e7cfde diff --git a/vendor/nim-testutils b/vendor/nim-testutils index 4d37244f..e4d37dc1 160000 --- a/vendor/nim-testutils +++ b/vendor/nim-testutils @@ -1 +1 @@ -Subproject commit 4d37244f9f5e1acd8592a4ceb5c3fc47bc160181 +Subproject commit e4d37dc1652d5c63afb89907efb5a5e812261797 diff --git a/vendor/nim-toml-serialization b/vendor/nim-toml-serialization index fea85b27..b5b387e6 160000 --- a/vendor/nim-toml-serialization +++ b/vendor/nim-toml-serialization @@ -1 +1 @@ -Subproject commit fea85b27f0badcf617033ca1bc05444b5fd8aa7a +Subproject commit b5b387e6fb2a7cc75d54a269b07cc6218361bd46 diff --git a/vendor/nim-websock b/vendor/nim-websock index ebe308a7..35ae76f1 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508 +Subproject commit 35ae76f1559e835c80f9c1a3943bf995d3dd9eb5 diff --git a/vendor/nim-zlib b/vendor/nim-zlib index 91cf360b..c71efff5 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit 91cf360b1aeb2e0c753ff8bac6de22a41c5ed8cd +Subproject commit c71efff5fd1721362b3363dc7d0e2a4c0dbc6453 diff --git a/vendor/stint b/vendor/stint index 5c5e01ce..470b7892 160000 --- a/vendor/stint +++ b/vendor/stint @@ -1 +1 @@ -Subproject commit 5c5e01cef089a261474b7abfe246b37447aaa8ed +Subproject commit 470b7892561b5179ab20bd389a69217d6213fe58 diff --git a/vendor/upraises b/vendor/upraises deleted file mode 160000 index bc262898..00000000 --- a/vendor/upraises +++ /dev/null @@ -1 +0,0 @@ -Subproject commit bc2628989b63854d980e92dadbd58f83e34b6f25