Merge branch 'master' into ci/fix-windows-ci

This commit is contained in:
gmega 2026-01-15 17:16:45 -03:00
commit 39f7f4ffe0
No known key found for this signature in database
GPG Key ID: 6290D34EAD824B18
225 changed files with 6705 additions and 1570 deletions

View File

@ -226,7 +226,7 @@ runs:
run: |
git config --global core.symlinks false
- name: Build Nim and Codex dependencies
- name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0}
run: |
which gcc

View File

@ -70,7 +70,7 @@ runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

View File

@ -54,9 +54,9 @@ jobs:
with:
node-version: 22
- name: Install Ethereum node dependencies
- name: Start Ethereum node with Logos Storage contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth
working-directory: vendor/logos-storage-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |

View File

@ -1,175 +0,0 @@
name: Deploy - Devnet
on:
workflow_dispatch:
inputs:
codex_image:
description: codexstorage/nim-codex:latest-dist-tests
required: false
type: string
workflow_call:
inputs:
codex_image:
description: codexstorage/nim-codex:latest-dist-tests
required: true
type: string
env:
CODEX_NAMESPACE: codex
TOOLS_NAMESPACE: common
KUBE_CONFIG: ${{ secrets.DEVNET_KUBE_CONFIG }}
KUBE_VERSION: v1.33.1
CODEX_IMAGE: ${{ inputs.codex_image }}
SSH_HOSTS: ${{ secrets.DEVNET_SSH_HOSTS }}
SSH_PORT: ${{ secrets.DEVNET_SSH_PORT }}
SSH_USERNAME: ${{ secrets.DEVNET_SSH_USERNAME }}
SSH_PRIVATE_KEY: ${{ secrets.DEVNET_SSH_KEY }}
jobs:
deploy-contracts:
name: Deploy contracts
runs-on: ubuntu-latest
steps:
- name: Create access token
uses: actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ secrets.DEPLOYER_APP_ID }}
private-key: ${{ secrets.DEPLOYER_PRIVATE_KEY }}
repositories: codex-contracts-eth
- name: Checkout sources
uses: actions/checkout@v4
with:
submodules: recursive
- name: Get contracts submodule ref
id: contracts
run: echo "ref=$(git rev-parse HEAD:vendor/codex-contracts-eth)" >> $GITHUB_OUTPUT
- name: Deploy smart contracts
uses: the-actions-org/workflow-dispatch@v4
with:
repo: codex-storage/codex-contracts-eth
workflow: devnet-contracts.yml
token: ${{ steps.app-token.outputs.token }}
wait-for-completion-timeout: 20m
wait-for-completion-interval: 20s
inputs: '{ "network": "codex_devnet", "contracts_ref": "${{ steps.contracts.outputs.ref }}" }'
bootstrap-nodes:
name: Bootstrap nodes
runs-on: ubuntu-latest
needs: deploy-contracts
steps:
- name: Codex Bootstrap - Update
uses: appleboy/ssh-action@v1
with:
host: ${{ secrets.DEVNET_SSH_HOSTS }}
username: ${{ secrets.DEVNET_SSH_USERNAME }}
key: ${{ secrets.DEVNET_SSH_KEY }}
port: ${{ secrets.DEVNET_SSH_PORT }}
script: /opt/codex/remote-deploy.sh ${{ env.CODEX_IMAGE }}
cluster-nodes:
name: Cluster nodes
runs-on: ubuntu-latest
needs: bootstrap-nodes
steps:
- name: Kubectl - Install ${{ env.KUBE_VERSION }}
uses: azure/setup-kubectl@v4
with:
version: ${{ env.KUBE_VERSION }}
- name: Kubectl - Kubeconfig
run: |
mkdir -p "${HOME}"/.kube
echo "${{ env.KUBE_CONFIG }}" | base64 -d > "${HOME}"/.kube/config
- name: Codex Storage - Update
run: |
for node in {1..5}; do
kubectl -n "${{ env.CODEX_NAMESPACE }}" patch statefulset codex-storage-${node} \
--patch '{"spec": {"template": {"spec":{"containers":[{"name": "codex", "image":"${{ env.CODEX_IMAGE }}"}]}}}}'
done
- name: Codex Validators - Update
run: |
for node in {1..1}; do
kubectl -n "${{ env.CODEX_NAMESPACE }}" patch statefulset codex-validator-${node} \
--patch '{"spec": {"template": {"spec":{"containers":[{"name": "codex", "image":"${{ env.CODEX_IMAGE }}"}]}}}}'
done
- name: Codex Storage - Status
run: |
WAIT=300
SECONDS=0
sleep=1
for instance in {1..5}; do
while (( SECONDS < WAIT )); do
pod=codex-storage-${instance}-1
phase=$(kubectl get pod "${pod}" -n "${{ env.CODEX_NAMESPACE }}" -o jsonpath='{.status.phase}')
if [[ "${phase}" == "Running" ]]; then
echo "Pod ${pod} is in the ${phase} state"
break
else
echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))"
fi
sleep "${sleep}"
done
done
- name: Codex Validators - Status
run: |
WAIT=300
SECONDS=0
sleep=1
for instance in {1..1}; do
while (( SECONDS < WAIT )); do
pod=codex-validator-${instance}-1
phase=$(kubectl get pod "${pod}" -n "${{ env.CODEX_NAMESPACE }}" -o jsonpath='{.status.phase}')
if [[ "${phase}" == "Running" ]]; then
echo "Pod ${pod} is in the ${phase} state"
break
else
echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))"
fi
sleep "${sleep}"
done
done
- name: Tools - Update
run: |
crawler_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app.kubernetes.io/name=crawler' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
discordbot_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app=discordbot' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
for pod in "${crawler_pod}" "${discordbot_pod}"; do
if [[ -n "${pod}" ]]; then
kubectl delete pod -n "${{ env.TOOLS_NAMESPACE }}" "${pod}" --grace-period=10
fi
done
- name: Tools - Status
run: |
WAIT=300
SECONDS=0
sleep=1
crawler_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app.kubernetes.io/name=crawler' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
discordbot_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app=discordbot' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
for pod in "${crawler_pod}" "${discordbot_pod}"; do
if [[ -n "${pod}" ]]; then
while (( SECONDS < WAIT )); do
phase=$(kubectl get pod "${pod}" -n "${{ env.TOOLS_NAMESPACE }}" -o jsonpath='{.status.phase}')
if [[ "${phase}" == "Running" ]]; then
echo "Pod ${pod} is in the ${phase} state"
break
else
echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))"
fi
sleep "${sleep}"
done
fi
done

View File

@ -1,71 +0,0 @@
name: Docker - Dist-Tests
on:
push:
branches:
- master
tags:
- 'v*.*.*'
paths-ignore:
- '**/*.md'
- '.gitignore'
- '.github/**'
- '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml'
- '!.github/workflows/deploy-devnet.yml'
- 'docker/**'
- '!docker/codex.Dockerfile'
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
inputs:
run_release_tests:
description: Run Release tests
required: false
type: boolean
default: false
deploy_devnet:
description: Deploy Devnet
required: false
type: boolean
default: false
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests"
run_release_tests: ${{ inputs.run_release_tests }}
secrets: inherit
deploy-devnet:
name: Deploy Devnet
uses: ./.github/workflows/deploy-devnet.yml
needs: build-and-push
if: ${{ inputs.deploy_devnet || github.event_name == 'push' && github.ref_name == github.event.repository.default_branch }}
with:
codex_image: ${{ needs.build-and-push.outputs.codex_image }}
secrets: inherit

View File

@ -70,7 +70,7 @@ on:
type: string
outputs:
codex_image:
description: Codex Docker image tag
description: Logos Storage Docker image tag
value: ${{ jobs.publish.outputs.codex_image }}
@ -87,7 +87,7 @@ env:
TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests
TESTS_SOURCE: codex-storage/cs-codex-dist-tests
TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
@ -316,7 +316,7 @@ jobs:
max-parallel: 1
matrix:
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
@ -333,7 +333,7 @@ jobs:
name: Run Release Tests
needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}

View File

@ -31,7 +31,7 @@ jobs:
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push

View File

@ -52,7 +52,7 @@ jobs:
node-version: 18
- name: Build OpenAPI
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API"
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false

View File

@ -12,7 +12,7 @@ env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned
rust_version: 1.79.0
codex_binary_base: codex
storage_binary_base: storage
cirdl_binary_base: cirdl
build_dir: build
nim_flags: ''
@ -32,7 +32,6 @@ jobs:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
@ -74,18 +73,18 @@ jobs:
windows*) os_name="windows" ;;
esac
github_ref_name="${GITHUB_REF_NAME/\//-}"
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then
codex_binary="${codex_binary}.exe"
storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe"
fi
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV
echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
- name: Release - Build
run: |
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}"
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Libraries
@ -96,11 +95,11 @@ jobs:
done
fi
- name: Release - Upload codex build artifacts
- name: Release - Upload Logos Storage build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.codex_binary }}
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}*
name: release-${{ env.storage_binary }}
path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
retention-days: 30
- name: Release - Upload cirdl build artifacts
@ -140,7 +139,7 @@ jobs:
}
# Compress and prepare
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do
for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only
@ -189,6 +188,7 @@ jobs:
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
echo "${branch}" > "${folder}"/latest
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
rm -f "${folder}"/latest
# master branch
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
@ -212,6 +212,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
with:
token: ${{ secrets.DISPATCH_PAT }}
repository: codex-storage/py-codex-api-client
repository: logos-storage/logos-storage-py-api-client
event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}'
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

49
.gitmodules vendored
View File

@ -37,22 +37,17 @@
path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/questionable"]
path = vendor/questionable
url = https://github.com/status-im/questionable.git
ignore = untracked
branch = master
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/asynctest"]
path = vendor/asynctest
url = https://github.com/status-im/asynctest.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-presto"]
path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git
@ -132,7 +127,7 @@
path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi
@ -160,13 +155,13 @@
path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git
ignore = untracked
branch = master
branch = stable
[submodule "vendor/nim-leopard"]
path = vendor/nim-leopard
url = https://github.com/status-im/nim-leopard.git
[submodule "vendor/nim-codex-dht"]
path = vendor/nim-codex-dht
url = https://github.com/codex-storage/nim-codex-dht.git
[submodule "vendor/logos-storage-nim-dht"]
path = vendor/logos-storage-nim-dht
url = https://github.com/logos-storage/logos-storage-nim-dht.git
ignore = untracked
branch = master
[submodule "vendor/nim-datastore"]
@ -178,9 +173,11 @@
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"]
path = vendor/codex-contracts-eth
url = https://github.com/status-im/codex-contracts-eth
[submodule "vendor/logos-storage-contracts-eth"]
path = vendor/logos-storage-contracts-eth
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
ignore = untracked
branch = master
[submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization
@ -195,26 +192,28 @@
url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git
url = https://github.com/logos-storage/nim-poseidon2.git
ignore = untracked
branch = master
[submodule "vendor/constantine"]
path = vendor/constantine
url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git
url = https://github.com/logos-storage/nim-circom-compat.git
ignore = untracked
branch = master
[submodule "vendor/codex-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
[submodule "vendor/logos-storage-proofs-circuits"]
path = vendor/logos-storage-proofs-circuits
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
ignore = untracked
branch = master
[submodule "vendor/nim-serde"]
path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git
url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git
url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"]
path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git
@ -225,9 +224,9 @@
path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = master
branch = main

View File

@ -93,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.'
# Builds the codex binary
# Builds the Logos Storage binary
all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
@ -246,6 +246,7 @@ format:
$(NPH) *.nim
$(NPH) codex/
$(NPH) tests/
$(NPH) library/
clean-nph:
rm -f $(NPH)
@ -256,4 +257,32 @@ print-nph-path:
clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
endif
endif # "variables.mk" was not included

View File

@ -1,22 +1,22 @@
# Codex Decentralized Durability Engine
# Logos Storage Decentralized Engine
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval.
> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex)
[![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
## Build and Run
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build).
For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
To build the project, clone it and run:
@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root
Run the client with:
```bash
build/codex
build/storage
```
## Configuration
It is possible to configure a Codex node in several ways:
It is possible to configure a Logos Storage node in several ways:
1. CLI options
2. Environment variables
3. Configuration file
@ -45,21 +45,71 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
## Guides
To get acquainted with Codex, consider:
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
To get acquainted with Logos Storage, consider:
* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
Currently, only a Go binding is included.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
Build the Go example:
```bash
go build -o storage-go examples/golang/storage.go
```
Export the library path:
```bash
export LD_LIBRARY_PATH=build
```
Run the example:
```bash
./storage-go
```
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.

View File

@ -10,17 +10,17 @@ nim c -r run_benchmarks
```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI
## Logos Storage Ark Circom CLI
Runs Codex's prover setup with Ark / Circom.
Runs Logos Storage's prover setup with Ark / Circom.
Compile:
```sh

View File

@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir()
result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli"
result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
@ -118,7 +118,7 @@ proc createCircuit*(
##
## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs.
##
let circdir = circBenchDir

View File

@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
)
benchRuns[benchmarkName] = (runs.avg(), count)
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
if printRegular:
echo ""
for k, v in benchRuns:
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
if printTsv:
echo ""
echo "name", "\t", "avgTimeSec", "\t", "count"
for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math
func floorLog2*(x: int): int =

View File

@ -8,7 +8,13 @@ proc truthy(val: string): bool =
const truthySwitches = @["yes", "1", "on", "true"]
return val in truthySwitches
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
proc buildBinary(
srcName: string,
outName = os.lastPathPart(srcName),
srcDir = "./",
params = "",
lang = "c",
) =
if not dirExists "build":
mkDir "build"
@ -23,32 +29,56 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
let
# Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
name & ".nim"
srcName & ".nim"
exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, srcDir, params
exec "build/" & name
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
if not dirExists "build":
mkDir "build"
task codex, "build codex binary":
if `type` == "dynamic":
let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task storage, "build logos storage binary":
buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl"
task testCodex, "Build & run Codex tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true"
task testStorage, "Build & run Logos Storage tests":
test "testCodex",
outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
task testContracts, "Build & run Codex Contract tests":
task testContracts, "Build & run Logos Storage Contract tests":
test "testContracts"
task testIntegration, "Run integration tests":
buildBinary "codex",
outName = "storage",
params =
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:chronicles_disabled_topics=JSONRPC-HTTP-CLIENT,websock,libp2p,discv5 -d:codex_enable_proof_failures=true"
var sinks = @["textlines[nocolors,file]"]
@ -63,24 +93,24 @@ task testIntegration, "Run integration tests":
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary":
codexTask()
task build, "build Logos Storage binary":
storageTask()
task test, "Run tests":
testCodexTask()
testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask()
testStorageTask()
testContractsTask()
testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
codexTask()
storageTask()
test "testTaiko"
import strutils
@ -113,7 +143,7 @@ task coverage, "generates code coverage report":
test "coverage",
srcDir = "tests/",
params =
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c")
rmDir("coverage")
mkDir("coverage")
@ -126,10 +156,32 @@ task coverage, "generates code coverage report":
nimSrcs
)
echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ")
exec(
"genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report "
)
echo " ======== Coverage report Done ======== "
task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "":
exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

51
ci/linux.Jenkinsfile Normal file
View File

@ -0,0 +1,51 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.37'
pipeline {
agent {
docker {
label 'linuxcontainer'
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
args '--volume=/nix:/nix ' +
'--volume=/etc/nix:/etc/nix '
}
}
options {
timestamps()
ansiColor('xterm')
timeout(time: 20, unit: 'MINUTES')
disableConcurrentBuilds()
disableRestartFromStage()
/* manage how many builds we keep */
buildDiscarder(logRotator(
numToKeepStr: '20',
daysToKeepStr: '30',
))
}
stages {
stage('Build') {
steps {
script {
nix.flake("default")
}
}
}
stage('Check') {
steps {
script {
sh './result/bin/storage --version'
}
}
}
}
post {
cleanup {
cleanWs()
dir(env.WORKSPACE_TMP) { deleteDir() }
}
}
}

View File

@ -1,11 +1,15 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.13'
library 'status-jenkins-lib@v1.9.37'
pipeline {
agent { label 'linux && x86_64 && nix-2.24' }
agent { label 'macos && aarch64 && nix' }
options {
timestamps()
ansiColor('xterm')
timeout(time: 20, unit: 'MINUTES')
disableConcurrentBuilds()
disableRestartFromStage()
/* manage how many builds we keep */
buildDiscarder(logRotator(
numToKeepStr: '20',
@ -25,13 +29,16 @@ pipeline {
stage('Check') {
steps {
script {
sh './result/bin/codex --version'
sh './result/bin/storage --version'
}
}
}
}
post {
cleanup { cleanWs() }
cleanup {
cleanWs()
dir(env.WORKSPACE_TMP) { deleteDir() }
}
}
}

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -45,7 +45,7 @@ when isMainModule:
let config = CodexConf.load(
version = codexFullVersion,
envVarsPrefix = "codex",
envVarsPrefix = "storage",
secondarySources = proc(
config: CodexConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} =
@ -54,6 +54,16 @@ when isMainModule:
,
)
config.setupLogging()
try:
updateLogLevel(config.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
config.setupMetrics()
if not (checkAndCreateDataDir((config.dataDir).string)):
@ -89,15 +99,15 @@ when isMainModule:
try:
CodexServer.new(config, privateKey)
except Exception as exc:
error "Failed to start Codex", msg = exc.msg
error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure
## Ctrl+C handling
proc doShutdown() =
shutdown = server.stop()
shutdown = server.shutdown()
state = CodexStatus.Stopping
notice "Stopping Codex"
notice "Stopping Logos Storage"
proc controlCHandler() {.noconv.} =
when defined(windows):
@ -128,7 +138,7 @@ when isMainModule:
try:
waitFor server.start()
except CatchableError as error:
error "Codex failed to start", error = error.msg
error "Logos Storage failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they
@ -149,7 +159,7 @@ when isMainModule:
# be assigned before state switches to Stopping
waitFor shutdown
except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg
error "Logos Storage didn't shutdown correctly", error = error.msg
quit QuitFailure
notice "Exited codex"
notice "Exited Storage"

View File

@ -1,5 +1,5 @@
version = "0.1.0"
author = "Codex Team"
author = "Logos Storage Team"
description = "p2p data durability engine"
license = "MIT"
binDir = "build"

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -8,6 +8,7 @@
## those terms.
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/libp2p/cid
@ -38,6 +39,7 @@ const
DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds
type DiscoveryEngine* = ref object of RootObj
@ -51,11 +53,32 @@ type DiscoveryEngine* = ref object of RootObj
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block
minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try:
while b.discEngineRunning:
@ -78,8 +101,16 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
trace "Discovery request already in progress", cid
continue
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
@ -156,6 +187,7 @@ proc new*(
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
##
@ -171,4 +203,5 @@ proc new*(
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,12 +12,14 @@ import std/sets
import std/options
import std/algorithm
import std/sugar
import std/random
import pkg/chronos
import pkg/libp2p/[cid, switch, multihash, multicodec]
import pkg/metrics
import pkg/stint
import pkg/questionable
import pkg/stew/shims/sets
import ../../rng
import ../../stores/blockstore
@ -63,30 +65,59 @@ declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sen
declareCounter(
codex_block_exchange_blocks_received, "codex blockexchange blocks received"
)
declareCounter(
codex_block_exchange_spurious_blocks_received,
"codex blockexchange unrequested/duplicate blocks received",
)
declareCounter(
codex_block_exchange_discovery_requests_total,
"Total number of peer discovery requests sent",
)
declareCounter(
codex_block_exchange_peer_timeouts_total, "Total number of peer activity timeouts"
)
declareCounter(
codex_block_exchange_requests_failed_total,
"Total number of block requests that failed after exhausting retries",
)
const
DefaultMaxPeersPerRequest* = 10
# The default max message length of nim-libp2p is 100 megabytes, meaning we can
# in principle fit up to 1600 64k blocks per message, so 20 is well under
# that number.
DefaultMaxBlocksPerMessage = 20
DefaultTaskQueueSize = 100
DefaultConcurrentTasks = 10
# Don't do more than one discovery request per `DiscoveryRateLimit` seconds.
DiscoveryRateLimit = 3.seconds
DefaultPeerActivityTimeout = 1.minutes
# Match MaxWantListBatchSize to efficiently respond to incoming WantLists
PresenceBatchSize = MaxWantListBatchSize
CleanupBatchSize = 2048
type
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
PeerSelector* =
proc(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx {.gcsafe, raises: [].}
BlockExcEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
network*: BlockExcNetwork # Petwork interface
network*: BlockExcNetwork # Network interface
peers*: PeerCtxStore # Peers we're currently actively exchanging with
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
# Peers we're currently processing tasks for
selectPeer*: PeerSelector # Peers we're currently processing tasks for
concurrentTasks: int # Number of concurrent peers we're serving at any given time
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
blockexcRunning: bool # Indicates if the blockexc task is running
maxBlocksPerMessage: int
# Maximum number of blocks we can squeeze in a single message
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing
discovery*: DiscoveryEngine
advertiser*: Advertiser
lastDiscRequest: Moment # time of last discovery request
Pricing* = object
address*: EthAddress
@ -104,7 +135,6 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
## Start the blockexc task
##
await self.discovery.start()
await self.advertiser.start()
@ -154,8 +184,145 @@ proc sendWantBlock(
) # we want this remote to send us a block
codex_block_exchange_want_block_lists_sent.inc()
proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
Rng.instance.sample(peers)
proc sendBatchedWantList(
self: BlockExcEngine,
peer: BlockExcPeerCtx,
addresses: seq[BlockAddress],
full: bool,
) {.async: (raises: [CancelledError]).} =
var offset = 0
while offset < addresses.len:
let batchEnd = min(offset + MaxWantListBatchSize, addresses.len)
let batch = addresses[offset ..< batchEnd]
trace "Sending want list batch",
peer = peer.id,
batchSize = batch.len,
offset = offset,
total = addresses.len,
full = full
await self.network.request.sendWantList(
peer.id, batch, full = (full and offset == 0)
)
for address in batch:
peer.lastSentWants.incl(address)
offset = batchEnd
proc refreshBlockKnowledge(
self: BlockExcEngine, peer: BlockExcPeerCtx, skipDelta = false, resetBackoff = false
) {.async: (raises: [CancelledError]).} =
if peer.lastSentWants.len > 0:
var toRemove: seq[BlockAddress]
for address in peer.lastSentWants:
if address notin self.pendingBlocks:
toRemove.add(address)
if toRemove.len >= CleanupBatchSize:
await idleAsync()
break
for addr in toRemove:
peer.lastSentWants.excl(addr)
if self.pendingBlocks.wantListLen == 0:
if peer.lastSentWants.len > 0:
trace "Clearing want list tracking, no pending blocks", peer = peer.id
peer.lastSentWants.clear()
return
# We send only blocks that the peer hasn't already told us that they already have.
let
peerHave = peer.peerHave
toAsk = toHashSet(self.pendingBlocks.wantList.toSeq.filterIt(it notin peerHave))
if toAsk.len == 0:
if peer.lastSentWants.len > 0:
trace "Clearing want list tracking, peer has all blocks", peer = peer.id
peer.lastSentWants.clear()
return
let newWants = toAsk - peer.lastSentWants
if peer.lastSentWants.len > 0 and not skipDelta:
if newWants.len > 0:
trace "Sending delta want list update",
peer = peer.id, newWants = newWants.len, totalWants = toAsk.len
await self.sendBatchedWantList(peer, newWants.toSeq, full = false)
if resetBackoff:
peer.wantsUpdated
else:
trace "No changes in want list, skipping send", peer = peer.id
peer.lastSentWants = toAsk
else:
trace "Sending full want list", peer = peer.id, length = toAsk.len
await self.sendBatchedWantList(peer, toAsk.toSeq, full = true)
if resetBackoff:
peer.wantsUpdated
proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledError]).} =
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for peer in self.peers.peers.values.toSeq:
# We refresh block knowledge if:
# 1. the peer hasn't been refreshed in a while;
# 2. the list of blocks we care about has changed.
#
# Note that because of (2), it is important that we update our
# want list in the coarsest way possible instead of over many
# small updates.
#
# In dynamic swarms, staleness will dominate latency.
let
hasNewBlocks = peer.lastRefresh < self.pendingBlocks.lastInclusion
isKnowledgeStale = peer.isKnowledgeStale
if isKnowledgeStale or hasNewBlocks:
if not peer.refreshInProgress:
peer.refreshRequested()
await self.refreshBlockKnowledge(
peer, skipDelta = isKnowledgeStale, resetBackoff = hasNewBlocks
)
else:
trace "Not refreshing: peer is up to date", peer = peer.id
if (Moment.now() - lastIdle) >= runtimeQuota:
try:
await idleAsync()
except CancelledError:
discard
lastIdle = Moment.now()
proc searchForNewPeers(self: BlockExcEngine, cid: Cid) =
if self.lastDiscRequest + DiscoveryRateLimit < Moment.now():
trace "Searching for new peers for", cid = cid
codex_block_exchange_discovery_requests_total.inc()
self.lastDiscRequest = Moment.now() # always refresh before calling await!
self.discovery.queueFindBlocksReq(@[cid])
else:
trace "Not searching for new peers, rate limit not expired", cid = cid
proc evictPeer(self: BlockExcEngine, peer: PeerId) =
## Cleanup disconnected peer
##
trace "Evicting disconnected/departed peer", peer
let peerCtx = self.peers.get(peer)
if not peerCtx.isNil:
for address in peerCtx.blocksRequested:
self.pendingBlocks.clearRequest(address, peer.some)
# drop the peer from the peers table
self.peers.remove(peer)
proc downloadInternal(
self: BlockExcEngine, address: BlockAddress
@ -173,41 +340,147 @@ proc downloadInternal(
if self.pendingBlocks.retriesExhausted(address):
trace "Error retries exhausted"
codex_block_exchange_requests_failed_total.inc()
handle.fail(newException(RetriesExhaustedError, "Error retries exhausted"))
break
trace "Running retry handle"
let peers = self.peers.getPeersForBlock(address)
logScope:
peersWith = peers.with.len
peersWithout = peers.without.len
trace "Peers for block"
if peers.with.len > 0:
self.pendingBlocks.setInFlight(address, true)
await self.sendWantBlock(@[address], peers.with.randomPeer)
else:
self.pendingBlocks.setInFlight(address, false)
if peers.with.len == 0:
# We know of no peers that have the block.
if peers.without.len > 0:
await self.sendWantHave(@[address], peers.without)
self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
# If we have peers connected but none of them have the block, this
# could be because our knowledge about what they have has run stale.
# Tries to refresh it.
await self.refreshBlockKnowledge()
# Also tries to look for new peers for good measure.
# TODO: in the future, peer search and knowledge maintenance should
# be completely decoupled from one another. It is very hard to
# control what happens and how many neighbors we get like this.
self.searchForNewPeers(address.cidOrTreeCid)
await (handle or sleepAsync(self.pendingBlocks.retryInterval))
let nextDiscovery =
if self.lastDiscRequest + DiscoveryRateLimit > Moment.now():
(self.lastDiscRequest + DiscoveryRateLimit - Moment.now())
else:
0.milliseconds
let retryDelay =
max(secs(rand(self.pendingBlocks.retryInterval.secs)), nextDiscovery)
# We now wait for a bit and then retry. If the handle gets completed in the
# meantime (cause the presence handler might have requested the block and
# received it in the meantime), we are done. Retry delays are randomized
# so we don't get all block loops spinning at the same time.
await handle or sleepAsync(retryDelay)
if handle.finished:
break
# Without decrementing the retries count, this would infinitely loop
# trying to find peers.
self.pendingBlocks.decRetries(address)
# If we still don't have the block, we'll go for another cycle.
trace "No peers for block, will retry shortly"
continue
# Once again, it might happen that the block was requested to a peer
# in the meantime. If so, we don't need to do anything. Otherwise,
# we'll be the ones placing the request.
let scheduledPeer =
if not self.pendingBlocks.isRequested(address):
let peer = self.selectPeer(peers.with)
discard self.pendingBlocks.markRequested(address, peer.id)
peer.blockRequestScheduled(address)
trace "Request block from block retry loop"
await self.sendWantBlock(@[address], peer)
peer
else:
let peerId = self.pendingBlocks.getRequestPeer(address).get()
self.peers.get(peerId)
if scheduledPeer.isNil:
trace "Scheduled peer no longer available, clearing stale request", address
self.pendingBlocks.clearRequest(address)
continue
# Parks until either the block is received, or the peer times out.
let activityTimer = scheduledPeer.activityTimer()
await handle or activityTimer # TODO: or peerDropped
activityTimer.cancel()
# XXX: we should probably not have this. Blocks should be retried
# to infinity unless cancelled by the client.
self.pendingBlocks.decRetries(address)
if handle.finished:
trace "Handle for block finished", failed = handle.failed
break
else:
# If the peer timed out, retries immediately.
trace "Peer timed out during block request", peer = scheduledPeer.id
codex_block_exchange_peer_timeouts_total.inc()
await self.network.dropPeer(scheduledPeer.id)
# Evicts peer immediately or we may end up picking it again in the
# next retry.
self.evictPeer(scheduledPeer.id)
except CancelledError as exc:
trace "Block download cancelled"
if not handle.finished:
await handle.cancelAndWait()
except RetriesExhaustedError as exc:
warn "Retries exhausted for block", address, exc = exc.msg
codex_block_exchange_requests_failed_total.inc()
if not handle.finished:
handle.fail(exc)
finally:
self.pendingBlocks.setInFlight(address, false)
self.pendingBlocks.clearRequest(address)
proc requestBlocks*(
self: BlockExcEngine, addresses: seq[BlockAddress]
): SafeAsyncIter[Block] =
var handles: seq[BlockHandle]
# Adds all blocks to pendingBlocks before calling the first downloadInternal. This will
# ensure that we don't send incomplete want lists.
for address in addresses:
if address notin self.pendingBlocks:
handles.add(self.pendingBlocks.getWantHandle(address))
for address in addresses:
self.trackedFutures.track(self.downloadInternal(address))
let totalHandles = handles.len
var completed = 0
proc isFinished(): bool =
completed == totalHandles
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
# Be it success or failure, we're completing this future.
let value =
try:
# FIXME: this is super expensive. We're doing several linear scans,
# not to mention all the copying and callback fumbling in `one`.
let
handle = await one(handles)
i = handles.find(handle)
handles.del(i)
success await handle
except CancelledError as err:
warn "Block request cancelled", addresses, err = err.msg
raise err
except CatchableError as err:
error "Error getting blocks from exchange engine", addresses, err = err.msg
failure err
inc(completed)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
proc requestBlock*(
self: BlockExcEngine, address: BlockAddress
@ -239,60 +512,64 @@ proc completeBlock*(self: BlockExcEngine, address: BlockAddress, blk: Block) =
proc blockPresenceHandler*(
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async: (raises: []).} =
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
trace "Received block presence from peer", peer, len = blocks.len
let
peerCtx = self.peers.get(peer)
ourWantList = toSeq(self.pendingBlocks.wantList)
ourWantList = toHashSet(self.pendingBlocks.wantList.toSeq)
if peerCtx.isNil:
return
peerCtx.refreshReplied()
for blk in blocks:
if presence =? Presence.init(blk):
peerCtx.setPresence(presence)
let
peerHave = peerCtx.peerHave
dontWantCids = peerHave.filterIt(it notin ourWantList)
dontWantCids = peerHave - ourWantList
if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids)
peerCtx.cleanPresence(dontWantCids.toSeq)
let ourWantCids = ourWantList.filterIt(
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
not self.pendingBlocks.isInFlight(it)
)
self.pendingBlocks.markRequested(it, peer)
).toSeq
for address in ourWantCids:
self.pendingBlocks.setInFlight(address, true)
self.pendingBlocks.decRetries(address)
peerCtx.blockRequestScheduled(address)
if ourWantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
# FIXME: this will result in duplicate requests for blocks
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
warn "Failed to send wantBlock to peer", peer, err = err.msg
for address in ourWantCids:
self.pendingBlocks.clearRequest(address, peer.some)
proc scheduleTasks(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to
for p in self.peers:
for c in cids: # for each cid
for blockDelivery in blocksDelivery: # for each cid
# schedule a peer if it wants at least one cid
# and we have it in our local store
if c in p.peerWantsCids:
if blockDelivery.address in p.wantedBlocks:
let cid = blockDelivery.blk.cid
try:
if await (c in self.localStore):
if await (cid in self.localStore):
# TODO: the try/except should go away once blockstore tracks exceptions
self.scheduleTask(p)
break
except CancelledError as exc:
warn "Checking local store canceled", cid = c, err = exc.msg
warn "Checking local store canceled", cid = cid, err = exc.msg
return
except CatchableError as exc:
error "Error checking local store for cid", cid = c, err = exc.msg
error "Error checking local store for cid", cid = cid, err = exc.msg
raiseAssert "Unexpected error checking local store for cid"
proc cancelBlocks(
@ -301,28 +578,45 @@ proc cancelBlocks(
## Tells neighboring peers that we're no longer interested in a block.
##
let blocksDelivered = toHashSet(addrs)
var scheduledCancellations: Table[PeerId, HashSet[BlockAddress]]
if self.peers.len == 0:
return
trace "Sending block request cancellations to peers",
addrs, peers = self.peers.peerIds
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
proc dispatchCancellations(
entry: tuple[peerId: PeerId, addresses: HashSet[BlockAddress]]
): Future[PeerId] {.async: (raises: [CancelledError]).} =
trace "Sending block request cancellations to peer",
peer = entry.peerId, addresses = entry.addresses.len
await self.network.request.sendWantCancellations(
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
peer = entry.peerId, addresses = entry.addresses.toSeq
)
return peerCtx
return entry.peerId
try:
let (succeededFuts, failedFuts) = await allFinishedFailed[BlockExcPeerCtx](
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
processPeer
)
for peerCtx in self.peers.peers.values:
# Do we have pending requests, towards this peer, for any of the blocks
# that were just delivered?
let intersection = peerCtx.blocksRequested.intersection(blocksDelivered)
if intersection.len > 0:
# If so, schedules a cancellation.
scheduledCancellations[peerCtx.id] = intersection
if scheduledCancellations.len == 0:
return
let (succeededFuts, failedFuts) = await allFinishedFailed[PeerId](
toSeq(scheduledCancellations.pairs).map(dispatchCancellations)
)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
peerCtx.cleanPresence(addrs)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerId: PeerId):
let ctx = self.peers.get(peerId)
if not ctx.isNil:
ctx.cleanPresence(addrs)
for address in scheduledCancellations[peerId]:
ctx.blockRequestCancelled(address)
if failedFuts.len > 0:
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
@ -392,17 +686,31 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
return success()
proc blocksDeliveryHandler*(
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
self: BlockExcEngine,
peer: PeerId,
blocksDelivery: seq[BlockDelivery],
allowSpurious: bool = false,
) {.async: (raises: []).} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery]
let peerCtx = self.peers.get(peer)
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for bd in blocksDelivery:
logScope:
peer = peer
address = bd.address
try:
# Unknown peers and unrequested blocks are dropped with a warning.
if not allowSpurious and (peerCtx == nil or not peerCtx.blockReceived(bd.address)):
warn "Dropping unrequested or duplicate block received from peer"
codex_block_exchange_spurious_blocks_received.inc()
continue
if err =? self.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg
continue
@ -422,15 +730,25 @@ proc blocksDeliveryHandler*(
).errorOption:
warn "Unable to store proof and cid for a block"
continue
except CancelledError:
trace "Block delivery handling cancelled"
except CatchableError as exc:
warn "Error handling block delivery", error = exc.msg
continue
validatedBlocksDelivery.add(bd)
if (Moment.now() - lastIdle) >= runtimeQuota:
try:
await idleAsync()
except CancelledError:
discard
except CatchableError:
discard
lastIdle = Moment.now()
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let peerCtx = self.peers.get(peer)
if peerCtx != nil:
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
warn "Error paying for blocks", err = err.msg
@ -454,16 +772,17 @@ proc wantListHandler*(
presence: seq[BlockPresence]
schedulePeer = false
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
try:
for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope:
peer = peerCtx.id
address = e.address
wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants
if e.address notin peerCtx.wantedBlocks: # Adding new entry to peer wants
let
have =
try:
@ -474,6 +793,8 @@ proc wantListHandler*(
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel:
# This is sort of expected if we sent the block to the peer, as we have removed
# it from the peer's wantlist ourselves.
trace "Received cancelation for untracked block, skipping",
address = e.address
continue
@ -482,12 +803,14 @@ proc wantListHandler*(
case e.wantType
of WantType.WantHave:
if have:
trace "We HAVE the block", address = e.address
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
trace "We DON'T HAVE the block", address = e.address
if e.sendDontHave:
presence.add(
BlockPresence(
@ -497,28 +820,35 @@ proc wantListHandler*(
codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock:
peerCtx.peerWants.add(e)
peerCtx.wantedBlocks.incl(e.address)
schedulePeer = true
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
peerCtx.wantedBlocks.excl(e.address)
trace "Canceled block request",
address = e.address, len = peerCtx.peerWants.len
address = e.address, len = peerCtx.wantedBlocks.len
else:
trace "Peer has requested a block more than once", address = e.address
if e.wantType == WantType.WantBlock:
schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request",
address = e.address, len = peerCtx.peerWants.len
if presence.len >= PresenceBatchSize or (Moment.now() - lastIdle) >= runtimeQuota:
if presence.len > 0:
trace "Sending presence batch to remote", items = presence.len
await self.network.request.sendPresence(peer, presence)
presence = @[]
try:
await idleAsync()
except CancelledError:
discard
lastIdle = Moment.now()
# Send any remaining presence messages
if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
trace "Sending final presence to remote", items = presence.len
await self.network.request.sendPresence(peer, presence)
if schedulePeer:
@ -550,7 +880,7 @@ proc paymentHandler*(
else:
context.paymentChannel = self.wallet.acceptChannel(payment).option
proc setupPeer*(
proc peerAddedHandler*(
self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Perform initial setup, such as want
@ -560,88 +890,85 @@ proc setupPeer*(
trace "Setting up peer", peer
if peer notin self.peers:
let peerCtx = BlockExcPeerCtx(id: peer, activityTimeout: DefaultPeerActivityTimeout)
trace "Setting up new peer", peer
self.peers.add(BlockExcPeerCtx(id: peer))
self.peers.add(peerCtx)
trace "Added peer", peers = self.peers.len
# broadcast our want list, the other peer will do the same
if self.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(self.pendingBlocks.wantList)
await self.network.request.sendWantList(peer, cids, full = true)
await self.refreshBlockKnowledge(peerCtx)
if address =? self.pricing .? address:
trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
## Cleanup disconnected peer
##
proc localLookup(
self: BlockExcEngine, address: BlockAddress
): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} =
if address.leaf:
(await self.localStore.getBlockAndProof(address.treeCid, address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(address: address, blk: blkAndProof[0], proof: blkAndProof[1].some)
)
else:
(await self.localStore.getBlock(address)).map(
(blk: Block) => BlockDelivery(address: address, blk: blk, proof: CodexProof.none)
)
trace "Dropping peer", peer
iterator splitBatches[T](sequence: seq[T], batchSize: int): seq[T] =
var batch: seq[T]
for element in sequence:
if batch.len == batchSize:
yield batch
batch = @[]
batch.add(element)
# drop the peer from the peers table
self.peers.remove(peer)
if batch.len > 0:
yield batch
proc taskHandler*(
self: BlockExcEngine, task: BlockExcPeerCtx
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
self: BlockExcEngine, peerCtx: BlockExcPeerCtx
) {.async: (raises: [CancelledError, RetriesExhaustedError]).} =
# Send to the peer blocks he wants to get,
# if they present in our local store
# TODO: There should be all sorts of accounting of
# bytes sent/received here
# Blocks that have been sent have already been picked up by other tasks and
# should not be re-sent.
var
wantedBlocks = peerCtx.wantedBlocks.filterIt(not peerCtx.isBlockSent(it))
sent: HashSet[BlockAddress]
var wantsBlocks =
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
trace "Running task for peer", peer = peerCtx.id
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
for peerWant in task.peerWants.mitems:
if peerWant.address in addresses:
peerWant.inFlight = inFlight
for wantedBlock in wantedBlocks:
peerCtx.markBlockAsSent(wantedBlock)
if wantsBlocks.len > 0:
# Mark wants as in-flight.
let wantAddresses = wantsBlocks.mapIt(it.address)
updateInFlight(wantAddresses, true)
wantsBlocks.sort(SortOrder.Descending)
try:
for batch in wantedBlocks.toSeq.splitBatches(self.maxBlocksPerMessage):
var blockDeliveries: seq[BlockDelivery]
for wantedBlock in batch:
# I/O is blocking so looking up blocks sequentially is fine.
without blockDelivery =? await self.localLookup(wantedBlock), err:
error "Error getting block from local store",
err = err.msg, address = wantedBlock
peerCtx.markBlockAsNotSent(wantedBlock)
continue
blockDeliveries.add(blockDelivery)
sent.incl(wantedBlock)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
if e.address.leaf:
(await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
)
)
else:
(await self.localStore.getBlock(e.address)).map(
(blk: Block) =>
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
)
if blockDeliveries.len == 0:
continue
let
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
if bd =? it.value:
bd
else:
raiseAssert "Unexpected error in local lookup"
# All the wants that failed local lookup must be set to not-in-flight again.
let
successAddresses = blocksDelivery.mapIt(it.address)
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
updateInFlight(failedAddresses, false)
if blocksDelivery.len > 0:
trace "Sending blocks to peer",
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
await self.network.request.sendBlocksDelivery(task.id, blocksDelivery)
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
task.peerWants.keepItIf(it.address notin successAddresses)
await self.network.request.sendBlocksDelivery(peerCtx.id, blockDeliveries)
codex_block_exchange_blocks_sent.inc(blockDeliveries.len.int64)
# Drops the batch from the peer's set of wanted blocks; i.e. assumes that after
# we send the blocks, then the peer no longer wants them, so we don't need to
# re-send them. Note that the send might still fail down the line and we will
# have removed those anyway. At that point, we rely on the requester performing
# a retry for the request to succeed.
peerCtx.wantedBlocks.keepItIf(it notin sent)
finally:
# Better safe than sorry: if an exception does happen, we don't want to keep
# those as sent, as it'll effectively prevent the blocks from ever being sent again.
peerCtx.blocksSent.keepItIf(it notin wantedBlocks)
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
## process tasks
@ -652,11 +979,47 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
while self.blockexcRunning:
let peerCtx = await self.taskQueue.pop()
await self.taskHandler(peerCtx)
except CancelledError:
trace "block exchange task runner cancelled"
except CatchableError as exc:
error "error running block exchange task", error = exc.msg
info "Exiting blockexc task runner"
proc selectRandom*(
peers: seq[BlockExcPeerCtx]
): BlockExcPeerCtx {.gcsafe, raises: [].} =
if peers.len == 1:
return peers[0]
proc evalPeerScore(peer: BlockExcPeerCtx): float =
let
loadPenalty = peer.blocksRequested.len.float * 2.0
successRate =
if peer.exchanged > 0:
peer.exchanged.float / (peer.exchanged + peer.blocksRequested.len).float
else:
0.5
failurePenalty = (1.0 - successRate) * 5.0
return loadPenalty + failurePenalty
let
scores = peers.mapIt(evalPeerScore(it))
maxScore = scores.max() + 1.0
weights = scores.mapIt(maxScore - it)
var totalWeight = 0.0
for w in weights:
totalWeight += w
var r = rand(totalWeight)
for i, weight in weights:
r -= weight
if r <= 0.0:
return peers[i]
return peers[^1]
proc new*(
T: type BlockExcEngine,
localStore: BlockStore,
@ -666,7 +1029,9 @@ proc new*(
advertiser: Advertiser,
peerStore: PeerCtxStore,
pendingBlocks: PendingBlocksManager,
maxBlocksPerMessage = DefaultMaxBlocksPerMessage,
concurrentTasks = DefaultConcurrentTasks,
selectPeer: PeerSelector = selectRandom,
): BlockExcEngine =
## Create new block exchange engine instance
##
@ -679,23 +1044,13 @@ proc new*(
wallet: wallet,
concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures(),
maxBlocksPerMessage: maxBlocksPerMessage,
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery,
advertiser: advertiser,
selectPeer: selectPeer,
)
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
await self.setupPeer(peerId)
else:
self.dropPeer(peerId)
if not isNil(network.switch):
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler(
peer: PeerId, wantList: WantList
): Future[void] {.async: (raises: []).} =
@ -721,12 +1076,24 @@ proc new*(
): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment)
proc peerAddedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
await self.peerAddedHandler(peer)
proc peerDepartedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
self.evictPeer(peer)
network.handlers = BlockExcHandlers(
onWantList: blockWantListHandler,
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler,
onPeerJoined: peerAddedHandler,
onPeerDeparted: peerDepartedHandler,
)
return self

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -34,7 +34,7 @@ declareGauge(
const
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 500.millis
DefaultRetryInterval* = 2.seconds
type
RetriesExhaustedError* = object of CatchableError
@ -42,7 +42,7 @@ type
BlockReq* = object
handle*: BlockHandle
inFlight*: bool
requested*: ?PeerId
blockRetries*: int
startTime*: int64
@ -50,12 +50,13 @@ type
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, inFlight = false
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
@ -65,11 +66,13 @@ proc getWantHandle*(
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
requested: requested,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
@ -86,9 +89,9 @@ proc getWantHandle*(
return handle
proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, inFlight = false
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), inFlight)
self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
@ -121,9 +124,6 @@ proc resolve*(
blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else:
trace "Block handle already finished", address = bd.address
@ -141,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
## Set inflight status for a block
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
##
self.blocks.withValue(address, pending):
pending[].inFlight = inFlight
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##
if self.isRequested(address):
return false
self.blocks.withValue(address, pending):
result = pending[].inFlight
pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -35,15 +35,14 @@ const
DefaultMaxInflight* = 100
type
WantListHandler* =
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
PaymentHandler* =
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
@ -51,6 +50,9 @@ type
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc(
id: PeerId,
@ -240,96 +242,116 @@ proc handlePayment(
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} =
## handle rpc messages
##
if msg.wantList.entries.len > 0:
b.trackedFutures.track(b.handleWantList(peer, msg.wantList))
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0:
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload))
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0:
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences))
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account):
b.trackedFutures.track(b.handleAccount(peer, account))
self.trackedFutures.track(self.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment):
b.trackedFutures.track(b.handlePayment(peer, payment))
self.trackedFutures.track(self.handlePayment(peer, payment))
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer
##
if peer in b.peers:
return b.peers.getOrDefault(peer, nil)
if peer in self.peers:
return self.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError])
.} =
try:
trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec)
return await self.switch.dial(peer, Codec)
except CancelledError as error:
raise error
except CatchableError as exc:
trace "Unable to connect to blockexc peer", exc = exc.msg
if not isNil(b.getConn):
getConn = b.getConn
if not isNil(self.getConn):
getConn = self.getConn
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await b.rpcHandler(p, msg)
await self.rpcHandler(p, msg)
# create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
debug "Created new blockexc peer", peer
b.peers[peer] = blockExcPeer
self.peers[peer] = blockExcPeer
return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
## Perform initial setup, such as want
## list exchange
##
discard b.getOrCreatePeer(peer)
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer
##
if b.isSelf(peer.peerId):
if self.isSelf(peer.peerId):
trace "Skipping dialing self", peer = peer.peerId
return
if peer.peerId in b.peers:
if peer.peerId in self.peers:
trace "Already connected to peer", peer = peer.peerId
return
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
proc dropPeer*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
trace "Dropping peer", peer
try:
if not self.switch.isNil:
await self.switch.disconnect(peer)
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
discard self.getOrCreatePeer(peer)
if not self.handlers.onPeerJoined.isNil:
await self.handlers.onPeerJoined(peer)
proc handlePeerDeparted*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Cleanup disconnected peer
##
trace "Dropping peer", peer
b.peers.del(peer)
trace "Cleaning up departed peer", peer
self.peers.del(peer)
if not self.handlers.onPeerDeparted.isNil:
await self.handlers.onPeerDeparted(peer)
method init*(self: BlockExcNetwork) =
method init*(self: BlockExcNetwork) {.raises: [].} =
## Perform protocol initialization
##
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
): Future[void] {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId)
await self.handlePeerJoined(peerId)
elif event.kind == PeerEventKind.Left:
await self.handlePeerDeparted(peerId)
else:
self.dropPeer(peerId)
warn "Unknown peer event", event
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -24,10 +24,9 @@ logScope:
const DefaultYieldInterval = 50.millis
type
ConnProvider* =
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
NetworkPeer* = ref object of RootObj
id*: PeerId
@ -65,7 +64,9 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
trace "Detaching read loop", peer = self.id, connId = conn.oid
warn "Detaching read loop", peer = self.id, connId = conn.oid
if self.sendConn == conn:
self.sendConn = nil
await conn.close()
proc connect*(
@ -89,7 +90,12 @@ proc send*(
return
trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg))
try:
await conn.writeLp(protobufEncode(msg))
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
func new*(
T: type NetworkPeer,

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -25,28 +25,77 @@ import ../../logutils
export payments, nitro
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
if staleness and self.refreshInProgress:
trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
staleness
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
price += precense[].price
price
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -62,21 +62,23 @@ func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if peer.peerHave.anyIt(it == address):
if address in peer:
res.with.add(peer)
else:
res.without.add(peer)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,7 +9,6 @@
import std/hashes
import std/sequtils
import pkg/stew/endians2
import message
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash =
hash(e.address)

View File

@ -1,4 +1,4 @@
# Protocol of data exchange between Codex nodes
# Protocol of data exchange between Logos Storage nodes
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
@ -25,11 +25,15 @@ type
WantListEntry* = object
address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries

View File

@ -1,4 +1,4 @@
// Protocol of data exchange between Codex nodes.
// Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,16 +9,14 @@
import std/tables
import std/sugar
import std/hashes
export tables
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import pkg/stew/[byteutils, endians2]
import pkg/questionable
import pkg/questionable/results
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
else:
"cid: " & $a.cid
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/questionable
import pkg/questionable/results
@ -31,7 +28,7 @@ type
ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
gcsafe, async: (raises: [ChunkerError, CancelledError])
async: (raises: [ChunkerError, CancelledError])
.}
# Reader that splits input data into fixed-size chunks
@ -77,7 +74,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var res = 0
try:
while res < len:
@ -105,7 +102,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var total = 0
try:
while total < len:

View File

@ -1,6 +1,7 @@
{.push raises: [].}
import pkg/chronos
import pkg/stew/endians2
import pkg/upraises
import pkg/stint
type
@ -8,10 +9,12 @@ type
SecondsSince1970* = int64
Timeout* = object of CatchableError
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} =
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
raiseAssert "not implemented"
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
method waitUntil*(
clock: Clock, time: SecondsSince1970
) {.base, async: (raises: [CancelledError]).} =
raiseAssert "not implemented"
method start*(clock: Clock) {.base, async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,6 +12,7 @@ import std/strutils
import std/os
import std/tables
import std/cpuinfo
import std/net
import pkg/chronos
import pkg/taskpools
@ -21,7 +22,6 @@ import pkg/confutils
import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2
@ -57,10 +57,20 @@ type
repoStore: RepoStore
maintenance: BlockMaintainer
taskpool: Taskpool
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
func config*(self: CodexServer): CodexConf =
return self.config
func node*(self: CodexServer): CodexNodeRef =
return self.codexNode
func repoStore*(self: CodexServer): RepoStore =
return self.repoStore
proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1
trace "Checking sync state of Ethereum provider..."
@ -128,7 +138,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when codex_enable_proof_failures:
when storage_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
@ -159,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config
if s.isStarted:
warn "Storage server already started, skipping"
return
trace "Starting Storage node", config = $s.config
await s.repoStore.start()
s.maintenance.start()
await s.codexNode.switch.start()
@ -175,27 +189,55 @@ proc start*(s: CodexServer) {.async.} =
await s.bootstrapInteractions()
await s.codexNode.start()
s.restServer.start()
if s.restServer != nil:
s.restServer.start()
s.isStarted = true
proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node"
if not s.isStarted:
warn "Storage is not started"
return
let res = await noCancel allFinishedFailed[void](
notice "Stopping Storage node"
var futures =
@[
s.restServer.stop(),
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
]
)
if s.restServer != nil:
futures.add(s.restServer.stop())
let res = await noCancel allFinishedFailed[void](futures)
if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len
raiseAssert "Failed to stop codex node"
error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop Storage node"
proc close*(s: CodexServer) {.async.} =
var futures = @[s.codexNode.close(), s.repoStore.close()]
let res = await noCancel allFinishedFailed[void](futures)
if not s.taskpool.isNil:
s.taskpool.shutdown()
try:
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
proc shutdown*(server: CodexServer) {.async.} =
await server.stop()
await server.close()
proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
@ -211,21 +253,21 @@ proc new*(
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr})
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
.build()
var
cache: CacheStore = nil
taskpool: Taskpool
taskPool: Taskpool
try:
if config.numThreads == ThreadCount(0):
taskpool = Taskpool.new(numThreads = min(countProcessors(), 16))
taskPool = Taskpool.new(numThreads = min(countProcessors(), 16))
else:
taskpool = Taskpool.new(numThreads = int(config.numThreads))
info "Threadpool started", numThreads = taskpool.numThreads
taskPool = Taskpool.new(numThreads = int(config.numThreads))
info "Threadpool started", numThreads = taskPool.numThreads
except CatchableError as exc:
raiseAssert("Failure in taskpool initialization:" & exc.msg)
raiseAssert("Failure in taskPool initialization:" & exc.msg)
if config.cacheSize > 0'nb:
cache = CacheStore.new(cacheSize = config.cacheSize)
@ -295,7 +337,7 @@ proc new*(
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
@ -307,7 +349,7 @@ proc new*(
if config.prover:
let backend =
config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
some Prover.new(store, backend, config.numProofSamples, taskPool)
else:
none Prover
@ -317,13 +359,16 @@ proc new*(
engine = engine,
discovery = discovery,
prover = prover,
taskPool = taskpool,
taskPool = taskPool,
)
var restServer: RestServerRef = nil
if config.apiBindAddress.isSome:
restServer = RestServerRef
.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort),
initTAddress(config.apiBindAddress.get(), config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high,
)
@ -337,5 +382,5 @@ proc new*(
restServer: restServer,
repoStore: repoStore,
maintenance: maintenance,
taskpool: taskpool,
taskPool: taskPool,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
{.pop.}
import std/options
import std/parseutils
import std/strutils
import std/typetraits
import std/net
import pkg/chronos
import pkg/chronicles/helpers
@ -27,13 +29,12 @@ import pkg/confutils/std/net
import pkg/toml_serialization
import pkg/metrics
import pkg/metrics/chronos_httpserver
import pkg/stew/shims/net as stewnet
import pkg/stew/shims/parseutils
import pkg/stew/byteutils
import pkg/libp2p
import pkg/ethers
import pkg/questionable
import pkg/questionable/results
import pkg/stew/base64
import ./codextypes
import ./discovery
@ -46,13 +47,14 @@ import ./utils/natutils
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
from ./validationconfig import MaxSlots, ValidationGroups
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
type ThreadCount* = distinct Natural
@ -61,18 +63,18 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.}
proc defaultDataDir*(): string =
let dataDir =
when defined(windows):
"AppData" / "Roaming" / "Codex"
"AppData" / "Roaming" / "Storage"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
"Library" / "Application Support" / "Storage"
else:
".cache" / "codex"
".cache" / "storage"
getHomeDir() / dataDir
const
codex_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false
storage_enable_api_debug_peers* {.booldefine.} = false
storage_enable_proof_failures* {.booldefine.} = false
storage_enable_log_counter* {.booldefine.} = false
DefaultThreadCount* = ThreadCount(0)
@ -135,7 +137,7 @@ type
.}: Port
dataDir* {.
desc: "The directory where codex will store configuration and data",
desc: "The directory where Storage will store configuration and data",
defaultValue: defaultDataDir(),
defaultValueDesc: "",
abbr: "d",
@ -196,14 +198,16 @@ type
.}: ThreadCount
agentString* {.
defaultValue: "Codex",
defaultValue: "Logos Storage",
desc: "Node agent string which is used as identifier in network",
name: "agent-string"
.}: string
apiBindAddress* {.
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
.}: string
desc: "The REST API bind address",
defaultValue: "127.0.0.1".some,
name: "api-bindaddr"
.}: Option[string]
apiPort* {.
desc: "The REST Api port",
@ -261,6 +265,13 @@ type
name: "block-mn"
.}: int
blockRetries* {.
desc: "Number of times to retry fetching a block before giving up",
defaultValue: DefaultBlockRetries,
defaultValueDesc: $DefaultBlockRetries,
name: "block-retries"
.}: int
cacheSize* {.
desc:
"The size of the block cache, 0 disables the cache - " &
@ -380,7 +391,7 @@ type
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Codex will store proof circuit data",
desc: "Directory where Storage will store proof circuit data",
defaultValue: defaultDataDir() / "circuits",
defaultValueDesc: "data/circuits",
abbr: "cd",
@ -474,7 +485,7 @@ func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string =
let tag = strip(staticExec("git tag"))
let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace:
return "untagged build"
return tag
@ -485,7 +496,8 @@ proc getCodexRevision(): string =
return res
proc getCodexContractsRevision(): string =
let res = strip(staticExec("git rev-parse --short HEAD:vendor/codex-contracts-eth"))
let res =
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
return res
proc getNimBanner(): string =
@ -498,67 +510,85 @@ const
nimBanner* = getNimBanner()
codexFullVersion* =
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
"Codex contracts revision: " & codexContractsRevision & "\p" & nimBanner
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
proc parseCmdArg*(
T: typedesc[MultiAddress], input: string
): MultiAddress {.upraises: [ValueError].} =
): MultiAddress {.raises: [ValueError].} =
var ma: MultiAddress
try:
let res = MultiAddress.init(input)
if res.isOk:
ma = res.get()
else:
warn "Invalid MultiAddress", input = input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
except LPError as exc:
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
quit QuitFailure
ma
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} =
let count = parseInt(input)
if count != 0 and count < 2:
warn "Invalid number of threads", input = input
quit QuitFailure
ThreadCount(count)
proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
try:
let count = parseInt(p)
if count != 0 and count < 2:
return err("Invalid number of threads: " & p)
return ok(ThreadCount(count))
except ValueError as e:
return err("Invalid number of threads: " & p & ", error=" & e.msg)
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
proc parseCmdArg*(T: type ThreadCount, input: string): T =
let val = ThreadCount.parse(input)
if val.isErr:
fatal "Cannot parse the thread count.", input = input, error = val.error()
quit QuitFailure
return val.get()
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
var res: SignedPeerRecord
try:
if not res.fromURI(uri):
warn "Invalid SignedPeerRecord uri", uri = uri
quit QuitFailure
except LPError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
res
if not res.fromURI(p):
return err("The uri is not a valid SignedPeerRecord: " & p)
return ok(res)
except LPError, Base64Error:
let e = getCurrentException()
return err(e.msg)
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
let res = SignedPeerRecord.parse(uri)
if res.isErr:
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
quit QuitFailure
return res.get()
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
case p.toLowerAscii
of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
else:
if p.startsWith("extip:"):
try:
let ip = parseIpAddress(p[6 ..^ 1])
NatConfig(hasExtIp: true, extIp: ip)
return ok(NatConfig(hasExtIp: true, extIp: ip))
except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1]
raise newException(ValueError, error)
return err(error)
else:
let error = "Not a valid NAT option: " & p
raise newException(ValueError, error)
return err("Not a valid NAT option: " & p)
proc parseCmdArg*(T: type NatConfig, p: string): T =
let res = NatConfig.parse(p)
if res.isErr:
fatal "Cannot parse the NAT config.", error = res.error(), input = p
quit QuitFailure
return res.get()
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[]
@ -566,25 +596,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
proc parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get()
proc parseCmdArg*(T: type NBytes, val: string): T =
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
var num = 0'i64
let count = parseSize(val, num, alwaysBin = true)
let count = parseSize(p, num, alwaysBin = true)
if count == 0:
warn "Invalid number of bytes", nbytes = val
return err("Invalid number of bytes: " & p)
return ok(NBytes(num))
proc parseCmdArg*(T: type NBytes, val: string): T =
let res = NBytes.parse(val)
if res.isErr:
fatal "Cannot parse NBytes.", error = res.error(), input = val
quit QuitFailure
NBytes(num)
return res.get()
proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration
let count = parseDuration(val, dur)
if count == 0:
warn "Cannot parse duration", dur = dur
fatal "Cannot parse duration", dur = dur
quit QuitFailure
dur
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
@ -595,7 +631,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
try:
val = SignedPeerRecord.parseCmdArg(uri)
except LPError as err:
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
quit QuitFailure
proc readValue*(r: var TomlReader, val: var MultiAddress) =
@ -607,12 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk:
val = res.get()
else:
warn "Invalid MultiAddress", input = input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
proc readValue*(
r: var TomlReader, val: var NBytes
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var value = 0'i64
var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true)
@ -623,7 +659,7 @@ proc readValue*(
proc readValue*(
r: var TomlReader, val: var ThreadCount
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
try:
val = parseCmdArg(ThreadCount, str)
@ -632,7 +668,7 @@ proc readValue*(
proc readValue*(
r: var TomlReader, val: var Duration
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
var dur: Duration
let count = parseDuration(str, dur)
@ -699,7 +735,7 @@ proc stripAnsi*(v: string): string =
res
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
# Updates log levels (without clearing old ones)
let directives = logLevel.split(";")
try:
@ -768,7 +804,7 @@ proc setupLogging*(conf: CodexConf) =
of LogKind.None:
noOutput
when codex_enable_log_counter:
when storage_enable_log_counter:
var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter)
@ -779,15 +815,6 @@ proc setupLogging*(conf: CodexConf) =
else:
defaultChroniclesStream.outputs[0].writer = writer
try:
updateLogLevel(conf.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
proc setupMetrics*(config: CodexConf) =
if config.metricsEnabled:
let metricsAddress = config.metricsAddress

View File

@ -0,0 +1,8 @@
const ContentIdsExts = [
multiCodec("codex-root"),
multiCodec("codex-manifest"),
multiCodec("codex-block"),
multiCodec("codex-slot-root"),
multiCodec("codex-proving-root"),
multiCodec("codex-slot-cell"),
]

View File

@ -1,13 +1,13 @@
Codex Contracts in Nim
Logos Storage Contracts in Nim
=======================
Nim API for the [Codex smart contracts][1].
Nim API for the [Logos Storage smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1].
storage contract, see [Logos Storage Contracts][1].
Smart contract
--------------
@ -144,5 +144,5 @@ await storage
.markProofAsMissing(id, period)
```
[1]: https://github.com/status-im/codex-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md

View File

@ -1,3 +1,5 @@
{.push raises: [].}
import std/times
import pkg/ethers
import pkg/questionable
@ -72,7 +74,9 @@ method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
method waitUntil*(
clock: OnChainClock, time: SecondsSince1970
) {.async: (raises: [CancelledError]).} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -1,7 +1,6 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/upraises
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
@ -436,7 +435,7 @@ method canReserveSlot*(
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
@ -450,7 +449,7 @@ method subscribeRequests*(
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
@ -477,7 +476,7 @@ method subscribeSlotFilled*(
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
@ -491,7 +490,7 @@ method subscribeSlotFreed*(
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
@ -506,7 +505,7 @@ method subscribeSlotReservationsFull*(
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -520,7 +519,7 @@ method subscribeFulfillment(
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -535,7 +534,7 @@ method subscribeFulfillment(
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -549,7 +548,7 @@ method subscribeRequestCancelled*(
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -564,7 +563,7 @@ method subscribeRequestCancelled*(
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -578,7 +577,7 @@ method subscribeRequestFailed*(
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -593,7 +592,7 @@ method subscribeRequestFailed*(
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return

View File

@ -2,7 +2,7 @@ import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto
import pkg/nimcrypto/keccak
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,13 +10,13 @@
{.push raises: [].}
import std/algorithm
import std/net
import std/sequtils
import pkg/chronos
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
import pkg/questionable
import pkg/questionable/results
import pkg/stew/shims/net
import pkg/contractabi/address as ca
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
from pkg/nimcrypto import keccak256
@ -43,6 +43,7 @@ type Discovery* = ref object of RootObj
# record to advertice node connection information, this carry any
# address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
isStarted: bool
proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id
@ -157,7 +158,7 @@ method provide*(
method removeProvider*(
d: Discovery, peerId: PeerId
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
): Future[void] {.base, async: (raises: [CancelledError]).} =
## Remove provider from providers table
##
@ -203,10 +204,15 @@ proc start*(d: Discovery) {.async: (raises: []).} =
try:
d.protocol.open()
await d.protocol.start()
d.isStarted = true
except CatchableError as exc:
error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async: (raises: []).} =
if not d.isStarted:
warn "Discovery not started, skipping stop"
return
try:
await noCancel d.protocol.closeWait()
except CatchableError as exc:

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import ../stores

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/[sugar, atomics, sequtils]
@ -428,7 +425,7 @@ proc encodeData(
return failure("Unable to store block!")
idx.inc(params.steps)
without tree =? CodexTree.init(cids[]), err:
without tree =? (await CodexTree.init(self.taskPool, cids[])), err:
return failure(err)
without treeCid =? tree.rootCid, err:
@ -649,7 +646,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
without tree =?
(await CodexTree.init(self.taskPool, cids[0 ..< encoded.originalBlocksCount])), err:
return failure(err)
without treeCid =? tree.rootCid, err:
@ -680,7 +678,8 @@ proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
without (cids, _) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
without tree =?
(await CodexTree.init(self.taskPool, cids[0 ..< encoded.originalBlocksCount])), err:
return failure(err)
without treeCid =? tree.rootCid, err:

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -11,7 +11,7 @@
## 4. Remove usages of `nim-json-serialization` from the codebase
## 5. Remove need to declare `writeValue` for new types
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
##
## When declaring a new type, one should consider importing the `codex/logutils`
## module, and specifying `formatIt`. If textlines log output and json log output

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,11 +9,9 @@
# This module implements serialization and deserialization of Manifest
import pkg/upraises
import times
push:
{.upraises: [].}
{.push raises: [].}
import std/tables
import std/sequtils

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# This module defines all operations on Manifest
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec]

View File

@ -1,5 +1,4 @@
import pkg/chronos
import pkg/upraises
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
@ -23,15 +22,15 @@ type
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
@ -275,7 +274,7 @@ method subscribeProofSubmission*(
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
method unsubscribe*(subscription: Subscription) {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p
import pkg/questionable

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,16 +10,18 @@
{.push raises: [].}
import std/bitops
import std/sequtils
import std/[atomics, sequtils]
import pkg/questionable
import pkg/questionable/results
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/constantine/hashes
import pkg/taskpools
import pkg/chronos/threadsync
import ../../utils
import ../../rng
import ../../errors
import ../../blocktype
import ../../codextypes
from ../../utils/digest import digestBytes
@ -47,28 +49,6 @@ type
CodexProof* = ref object of ByteProof
mcodec*: MultiCodec
# CodeHashes is not exported from libp2p
# So we need to recreate it instead of
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var proof = CodexProof(mcodec: self.mcodec)
@ -128,38 +108,47 @@ proc `$`*(self: CodexProof): string =
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
## Compress two hashes
##
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/codex-storage/nim-codex/issues/1162
let input = @x & @y & @[key.byte]
var digest = hashes.sha256.hash(input)
let digest = ?MultiHash.digest(codec, input).mapFailure
success digest.digestBytes
success @digest
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
func initTree(mcodec: MultiCodec, leaves: openArray[ByteHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
compress(x, y, key, mcodec)
digestSize = ?mcodec.digestSize.mapFailure
Zero: ByteHash = newSeq[byte](digestSize)
if mhash.size != leaves[0].len:
if digestSize != leaves[0].len:
return failure "Invalid hash length"
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
var self = CodexTree(mcodec: mcodec)
?self.prepare(compressor, Zero, leaves)
success self
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
let tree = ?initTree(mcodec, leaves)
?tree.compute()
success tree
proc init*(
_: type CodexTree,
tp: Taskpool,
mcodec: MultiCodec = Sha256HashCodec,
leaves: seq[ByteHash],
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
let tree = ?initTree(mcodec, leaves)
?await tree.compute(tp)
success tree
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
@ -170,6 +159,18 @@ func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
CodexTree.init(mcodec, leaves)
proc init*(
_: type CodexTree, tp: Taskpool, leaves: seq[MultiHash]
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = leaves[0].mcodec
leaves = leaves.mapIt(it.digestBytes)
await CodexTree.init(tp, mcodec, leaves)
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
@ -180,6 +181,18 @@ func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
CodexTree.init(mcodec, leaves)
proc init*(
_: type CodexTree, tp: Taskpool, leaves: seq[Cid]
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
if leaves.len == 0:
return failure("Empty leaves")
let
mcodec = (?leaves[0].mhash.mapFailure).mcodec
leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes)
await CodexTree.init(tp, mcodec, leaves)
proc fromNodes*(
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
@ -190,23 +203,16 @@ proc fromNodes*(
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
if mhash.size != nodes[0].len:
if digestSize != nodes[0].len:
return failure "Invalid hash length"
var
self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
var self = CodexTree(mcodec: mcodec)
?self.fromNodes(compressor, Zero, nodes, nleaves)
let
index = Rng.instance.rand(nleaves - 1)
@ -228,10 +234,10 @@ func init*(
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
success CodexProof(
compress: compressor,

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,19 +9,58 @@
{.push raises: [].}
import std/bitops
import std/[bitops, atomics, sequtils]
import stew/assign2
import pkg/questionable/results
import pkg/taskpools
import pkg/chronos
import pkg/chronos/threadsync
import ../errors
import ../utils/sharedbuf
export sharedbuf
template nodeData(
data: openArray[byte], offsets: openArray[int], nodeSize, i, j: int
): openArray[byte] =
## Bytes of the j'th entry of the i'th level in the tree, starting with the
## leaves (at level 0).
let start = (offsets[i] + j) * nodeSize
data.toOpenArray(start, start + nodeSize - 1)
type
# TODO hash functions don't fail - removing the ?! from this function would
# significantly simplify the flow below
CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
MerkleTree*[H, K] = ref object of RootObj
layers*: seq[seq[H]]
compress*: CompressFn[H, K]
zero*: H
CompressData[H, K] = object
fn: CompressFn[H, K]
nodeSize: int
zero: H
MerkleTreeObj*[H, K] = object of RootObj
store*: seq[byte]
## Flattened merkle tree where hashes are assumed to be trivial bytes and
## uniform in size.
##
## Each layer of the tree is stored serially starting with the leaves and
## ending with the root.
##
## Beacuse the tree might not be balanced, `layerOffsets` contains the
## index of the starting point of each level, for easy lookup.
layerOffsets*: seq[int]
## Starting point of each level in the tree, starting from the leaves -
## multiplied by the entry size, this is the offset in the payload where
## the entries of that level start
##
## For example, a tree with 4 leaves will have [0, 4, 6] stored here.
##
## See nodesPerLevel function, from whic this sequence is derived
compress*: CompressData[H, K]
MerkleTree*[H, K] = ref MerkleTreeObj[H, K]
MerkleProof*[H, K] = ref object of RootObj
index*: int # linear index of the leaf, starting from 0
@ -30,33 +69,99 @@ type
compress*: CompressFn[H, K] # compress function
zero*: H # zero value
func levels*[H, K](self: MerkleTree[H, K]): int =
return self.layerOffsets.len
func depth*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len - 1
return self.levels() - 1
func nodesInLayer(offsets: openArray[int], layer: int): int =
if layer == offsets.high:
1
else:
offsets[layer + 1] - offsets[layer]
func nodesInLayer(self: MerkleTree | MerkleTreeObj, layer: int): int =
self.layerOffsets.nodesInLayer(layer)
func leavesCount*[H, K](self: MerkleTree[H, K]): int =
return self.layers[0].len
return self.nodesInLayer(0)
func levels*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len
func nodesPerLevel(nleaves: int): seq[int] =
## Given a number of leaves, return a seq with the number of nodes at each
## layer of the tree (from the bottom/leaves to the root)
##
## Ie For a tree of 4 leaves, return `[4, 2, 1]`
if nleaves <= 0:
return @[]
elif nleaves == 1:
return @[1, 1] # leaf and root
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] =
return self.layers[0]
var nodes: seq[int] = @[]
var m = nleaves
while true:
nodes.add(m)
if m == 1:
break
# Next layer size is ceil(m/2)
m = (m + 1) shr 1
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] =
for layer in self.layers:
yield layer
nodes
func layerOffsets(nleaves: int): seq[int] =
## Given a number of leaves, return a seq of the starting offsets of each
## layer in the node store that results from flattening the binary tree
##
## Ie For a tree of 4 leaves, return `[0, 4, 6]`
let nodes = nodesPerLevel(nleaves)
var tot = 0
let offsets = nodes.mapIt:
let cur = tot
tot += it
cur
offsets
template nodeData(self: MerkleTreeObj, i, j: int): openArray[byte] =
## Bytes of the j'th node of the i'th level in the tree, starting with the
## leaves (at level 0).
self.store.nodeData(self.layerOffsets, self.compress.nodeSize, i, j)
func layer*[H, K](
self: MerkleTree[H, K], layer: int
): seq[H] {.deprecated: "Expensive".} =
var nodes = newSeq[H](self.nodesInLayer(layer))
for i, h in nodes.mpairs:
assign(h, self[].nodeData(layer, i))
return nodes
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} =
self.layer(0)
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} =
for i in 0 ..< self.layerOffsets.len:
yield self.layer(i)
proc layers*[H, K](self: MerkleTree[H, K]): seq[seq[H]] {.deprecated: "Expensive".} =
for l in self.layers():
result.add l
iterator nodes*[H, K](self: MerkleTree[H, K]): H =
for layer in self.layers:
for node in layer:
## Iterate over the nodes of each layer starting with the leaves
var node: H
for i in 0 ..< self.layerOffsets.len:
let nodesInLayer = self.nodesInLayer(i)
for j in 0 ..< nodesInLayer:
assign(node, self[].nodeData(i, j))
yield node
func root*[H, K](self: MerkleTree[H, K]): ?!H =
let last = self.layers[^1]
if last.len != 1:
mixin assign
if self.layerOffsets.len == 0:
return failure "invalid tree"
return success last[0]
var h: H
assign(h, self[].nodeData(self.layerOffsets.high(), 0))
return success h
func getProof*[H, K](
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
@ -72,18 +177,19 @@ func getProof*[H, K](
var m = nleaves
for i in 0 ..< depth:
let j = k xor 1
path[i] =
if (j < m):
self.layers[i][j]
else:
self.zero
if (j < m):
assign(path[i], self[].nodeData(i, j))
else:
path[i] = self.compress.zero
k = k shr 1
m = (m + 1) shr 1
proof.index = index
proof.path = path
proof.nleaves = nleaves
proof.compress = self.compress
proof.compress = self.compress.fn
success()
@ -122,32 +228,169 @@ func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
success bool(root == ?proof.reconstructRoot(leaf))
func merkleTreeWorker*[H, K](
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
): ?!seq[seq[H]] =
let a = low(xs)
let b = high(xs)
let m = b - a + 1
func fromNodes*[H, K](
self: MerkleTree[H, K],
compressor: CompressFn,
zero: H,
nodes: openArray[H],
nleaves: int,
): ?!void =
mixin assign
if nodes.len < 2: # At least leaf and root
return failure "Not enough nodes"
if nleaves == 0:
return failure "No leaves"
self.compress = CompressData[H, K](fn: compressor, nodeSize: nodes[0].len, zero: zero)
self.layerOffsets = layerOffsets(nleaves)
if self.layerOffsets[^1] + 1 != nodes.len:
return failure "bad node count"
self.store = newSeqUninit[byte](nodes.len * self.compress.nodeSize)
for i in 0 ..< nodes.len:
assign(
self[].store.toOpenArray(
i * self.compress.nodeSize, (i + 1) * self.compress.nodeSize - 1
),
nodes[i],
)
success()
func merkleTreeWorker[H, K](
store: var openArray[byte],
offsets: openArray[int],
compress: CompressData[H, K],
layer: int,
isBottomLayer: static bool,
): ?!void =
## Worker used to compute the merkle tree from the leaves that are assumed to
## already be stored at the beginning of the `store`, as done by `prepare`.
# Throughout, we use `assign` to convert from H to bytes and back, assuming
# this assignment can be done somewhat efficiently (ie memcpy) - because
# the code must work with multihash where len(H) is can differ, we cannot
# simply use a fixed-size array here.
mixin assign
template nodeData(i, j: int): openArray[byte] =
# Pick out the bytes of node j in layer i
store.nodeData(offsets, compress.nodeSize, i, j)
let m = offsets.nodesInLayer(layer)
when not isBottomLayer:
if m == 1:
return success @[@xs]
return success()
let halfn: int = m div 2
let n: int = 2 * halfn
let isOdd: bool = (n != m)
var ys: seq[H]
if not isOdd:
ys = newSeq[H](halfn)
else:
ys = newSeq[H](halfn + 1)
# Because the compression function we work with works with H and not bytes,
# we need to extract H from the raw data - a little abstraction tax that
# ensures that properties like alignment of H are respected.
var a, b, tmp: H
for i in 0 ..< halfn:
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key)
assign(a, nodeData(layer, i * 2))
assign(b, nodeData(layer, i * 2 + 1))
tmp = ?compress.fn(a, b, key = key)
assign(nodeData(layer + 1, i), tmp)
if isOdd:
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
ys[halfn] = ?self.compress(xs[n], self.zero, key = key)
success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false)
assign(a, nodeData(layer, n))
tmp = ?compress.fn(a, compress.zero, key = key)
assign(nodeData(layer + 1, halfn), tmp)
merkleTreeWorker(store, offsets, compress, layer + 1, false)
proc merkleTreeWorker[H, K](
store: SharedBuf[byte],
offsets: SharedBuf[int],
compress: ptr CompressData[H, K],
signal: ThreadSignalPtr,
): bool =
defer:
discard signal.fireSync()
let res = merkleTreeWorker(
store.toOpenArray(), offsets.toOpenArray(), compress[], 0, isBottomLayer = true
)
return res.isOk()
func prepare*[H, K](
self: MerkleTree[H, K], compressor: CompressFn, zero: H, leaves: openArray[H]
): ?!void =
## Prepare the instance for computing the merkle tree of the given leaves using
## the given compression function. After preparation, `compute` should be
## called to perform the actual computation. `leaves` will be copied into the
## tree so they can be freed after the call.
if leaves.len == 0:
return failure "No leaves"
self.compress =
CompressData[H, K](fn: compressor, nodeSize: leaves[0].len, zero: zero)
self.layerOffsets = layerOffsets(leaves.len)
self.store = newSeqUninit[byte]((self.layerOffsets[^1] + 1) * self.compress.nodeSize)
for j in 0 ..< leaves.len:
assign(self[].nodeData(0, j), leaves[j])
return success()
proc compute*[H, K](self: MerkleTree[H, K]): ?!void =
merkleTreeWorker(
self.store, self.layerOffsets, self.compress, 0, isBottomLayer = true
)
proc compute*[H, K](
self: MerkleTree[H, K], tp: Taskpool
): Future[?!void] {.async: (raises: []).} =
if tp.numThreads == 1:
# With a single thread, there's no point creating a separate task
return self.compute()
# TODO this signal would benefit from reuse across computations
without signal =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
signal.close().expect("closing once works")
let res = tp.spawn merkleTreeWorker(
SharedBuf.view(self.store),
SharedBuf.view(self.layerOffsets),
addr self.compress,
signal,
)
# To support cancellation, we'd have to ensure the task we posted to taskpools
# exits early - since we're not doing that, block cancellation attempts
try:
await noCancel signal.wait()
except AsyncError as exc:
# Since we initialized the signal, the OS or chronos is misbehaving. In any
# case, it would mean the task is still running which would cause a memory
# a memory violation if we let it run - panic instead
raiseAssert "Could not wait for signal, was it initialized? " & exc.msg
if not res.sync():
return failure("merkle tree task failed")
return success()

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,9 +9,11 @@
{.push raises: [].}
import std/sequtils
import std/[sequtils, atomics]
import pkg/poseidon2
import pkg/taskpools
import pkg/chronos/threadsync
import pkg/constantine/math/io/io_fields
import pkg/constantine/platforms/abstractions
import pkg/questionable/results
@ -44,6 +46,17 @@ type
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
proc len*(v: Poseidon2Hash): int =
sizeof(v)
proc assign*(v: var openArray[byte], h: Poseidon2Hash) =
doAssert v.len == sizeof(h)
copyMem(addr v[0], addr h, sizeof(h))
proc assign*(h: var Poseidon2Hash, v: openArray[byte]) =
doAssert v.len == sizeof(h)
copyMem(addr h, addr v[0], sizeof(h))
proc `$`*(self: Poseidon2Tree): string =
let root = if self.root.isOk: self.root.get.toHex else: "none"
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
@ -63,7 +76,7 @@ converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
of KeyOdd: KeyOddF
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
proc initTree(leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
if leaves.len == 0:
return failure "Empty leaves"
@ -72,34 +85,43 @@ func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
var self = Poseidon2Tree()
?self.prepare(compressor, Poseidon2Zero, leaves)
success self
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
let self = ?initTree(leaves)
?self.compute()
success self
proc init*(
_: type Poseidon2Tree, tp: Taskpool, leaves: seq[Poseidon2Hash]
): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} =
let self = ?initTree(leaves)
?await self.compute(tp)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
proc init*(
_: type Poseidon2Tree, tp: Taskpool, leaves: seq[array[31, byte]]
): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} =
await Poseidon2Tree.init(tp, leaves.mapIt(Poseidon2Hash.fromBytes(it)))
proc fromNodes*(
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
): ?!Poseidon2Tree =
if nodes.len == 0:
return failure "Empty nodes"
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
self = Poseidon2Tree(compress: compressor, zero: zero)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let self = Poseidon2Tree()
?self.fromNodes(compressor, Poseidon2Zero, nodes, nleaves)
let
index = Rng.instance.rand(nleaves - 1)

11
codex/multicodec_exts.nim Normal file
View File

@ -0,0 +1,11 @@
const CodecExts = [
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
("codex-manifest", 0xCD01),
("codex-block", 0xCD02),
("codex-root", 0xCD03),
("codex-slot-root", 0xCD04),
("codex-proving-root", 0xCD05),
("codex-slot-cell", 0xCD06),
]

40
codex/multihash_exts.nim Normal file
View File

@ -0,0 +1,40 @@
import blscurve/bls_public_exports
import pkg/constantine/hashes
import poseidon2
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
if len(output) > 0:
let digest = hashes.sha256.hash(data)
copyMem(addr output[0], addr digest[0], 32)
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.Sponge.digest(data).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
const Sha2256MultiHash* = MHash(
mcodec: multiCodec("sha2-256"),
size: sha256.sizeDigest,
coder: sha2_256hash_constantine,
)
const HashExts = [
# override sha2-256 hash function
Sha2256MultiHash,
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
size: 32,
coder: poseidon2_sponge_rate2,
),
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
size: 32,
coder: poseidon2_merkle_2kb_sponge,
),
]

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -10,10 +10,10 @@
import
std/[options, os, strutils, times, net, atomics],
stew/shims/net as stewNet,
stew/[objects, results],
stew/[objects],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net
json_serialization/std/net,
results
import pkg/chronos
import pkg/chronicles

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -44,7 +44,7 @@ import ./indexingstrategy
import ./utils
import ./errors
import ./logutils
import ./utils/asynciter
import ./utils/safeasynciter
import ./utils/trackedfutures
export logutils
@ -52,7 +52,10 @@ export logutils
logScope:
topics = "codex node"
const DefaultFetchBatch = 10
const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
Contracts* =
@ -72,15 +75,15 @@ type
contracts*: Contracts
clock*: Clock
storage*: Contracts
taskpool: Taskpool
taskPool: Taskpool
trackedFutures: TrackedFutures
CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
BatchProc* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: CodexNodeRef): Switch =
return self.switch
@ -186,34 +189,62 @@ proc fetchBatched*(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
while not iter.finished:
let blockFutures = collect:
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
# Sliding window: maintain batchSize blocks in-flight
let
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
refillSize = max(refillThreshold, 1)
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
if blockFutures.len == 0:
var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
var blockResults = await self.networkStore.getBlocks(addresses)
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue
without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err:
trace "Some blocks failed to fetch", err = err.msg
return failure(err)
inc(successfulBlocks)
inc(completedInWindow)
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value)
if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
let numOfFailedBlocks = blockResults.len - blocks.len
if numOfFailedBlocks > 0:
return
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")")
if completedInWindow >= refillThreshold and not iter.finished:
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
for i in 0 ..< refillSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption:
if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
if not iter.finished:
await sleepAsync(1.millis)
success()
proc fetchBatched*(
@ -294,7 +325,7 @@ proc streamEntireDataset(
try:
# Spawn an erasure decoding job
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskPool
)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
@ -403,6 +434,7 @@ proc store*(
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize,
onBlockStored: OnBlockStoredProc = nil,
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest
@ -432,6 +464,9 @@ proc store*(
if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
if not onBlockStored.isNil:
onBlockStored(chunk)
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -439,7 +474,7 @@ proc store*(
finally:
await stream.close()
without tree =? CodexTree.init(cids), err:
without tree =? (await CodexTree.init(self.taskPool, cids)), err:
return failure(err)
without treeCid =? tree.rootCid(CIDv1, dataCodec), err:
@ -533,14 +568,15 @@ proc setupRequest(
# Erasure code the dataset according to provided parameters
let erasure = Erasure.new(
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskPool
)
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
trace "Unable to erasure code dataset"
return failure(error)
without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err:
without builder =?
Poseidon2Builder.new(self.networkStore.localStore, encoded, self.taskPool), err:
trace "Unable to create slot builder"
return failure(err)
@ -644,7 +680,9 @@ proc onStore(
return failure(err)
without builder =?
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
Poseidon2Builder.new(
self.networkStore, manifest, self.taskPool, manifest.verifiableStrategy
), err:
trace "Unable to create slots builder", err = err.msg
return failure(err)
@ -679,7 +717,7 @@ proc onStore(
trace "start repairing slot", slotIdx
try:
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskPool
)
if err =? (await erasure.repair(manifest)).errorOption:
error "Unable to erasure decode repairing manifest",
@ -846,7 +884,7 @@ proc start*(self: CodexNodeRef) {.async.} =
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node"
@ -871,6 +909,7 @@ proc stop*(self: CodexNodeRef) {.async.} =
if not self.clock.isNil:
await self.clock.stop()
proc close*(self: CodexNodeRef) {.async.} =
if not self.networkStore.isNil:
await self.networkStore.close
@ -880,7 +919,7 @@ proc new*(
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
taskpool: Taskpool,
taskPool: Taskpool,
prover = Prover.none,
contracts = Contracts.default,
): CodexNodeRef =
@ -893,7 +932,7 @@ proc new*(
engine: engine,
prover: prover,
discovery: discovery,
taskPool: taskpool,
taskPool: taskPool,
contracts: contracts,
trackedFutures: TrackedFutures(),
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sequtils
import std/mimetypes
@ -183,7 +180,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
router.api(MethodOptions, "/api/codex/v1/data") do(
router.api(MethodOptions, "/api/storage/v1/data") do(
resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -195,7 +192,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse:
router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner
##
@ -257,11 +254,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
finally:
await reader.closeWait()
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse:
let json = await formatManifestBlocks(node)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do(
router.api(MethodOptions, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -270,7 +267,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.api(MethodGet, "/api/codex/v1/data/{cid}") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -286,7 +283,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
await node.retrieveCid(cid.get(), local = true, resp = resp)
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do(
router.api(MethodDelete, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Deletes either a single block or an entire dataset
@ -307,7 +304,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do(
router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network to the local node
@ -328,7 +325,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network in a streaming
@ -347,7 +344,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download only the manifest.
@ -365,7 +362,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/data/{cid}/exists") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Only test if the give CID is available in the local store
@ -381,7 +378,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %*{$cid: hasCid}
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
let json =
%RestRepoStore(
totalBlocks: repoStore.totalBlocks,
@ -394,7 +391,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
## Returns active slots for the host
@ -412,7 +409,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do(
router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
slotId: SlotId
) -> RestApiResponse:
## Returns active slot with id {slotId} for the host. Returns 404 if the
@ -442,7 +439,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
restAgent.toJson, contentType = "application/json", headers = headers
)
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Returns storage that is for sale
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -464,7 +461,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Add available storage to sell.
## Every time Availability's offer finishes, its capacity is
## returned to the availability.
@ -544,7 +541,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do(
router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -553,7 +550,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do(
router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId
) -> RestApiResponse:
## Updates Availability.
@ -641,7 +638,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500)
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do(
router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
id: AvailabilityId
) -> RestApiResponse:
## Gets Availability's reservations.
@ -685,7 +682,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do(
router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
cid: Cid
) -> RestApiResponse:
var headers = buildCorsHeaders("POST", allowedOrigin)
@ -795,7 +792,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do(
router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
id: PurchaseId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -827,7 +824,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
@ -849,7 +846,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
## various node management api's
##
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse:
## Returns node SPR in requested format, json or text.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -872,7 +869,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse:
## Returns node's peerId in requested format, json or text.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -891,7 +888,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do(
router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do(
peerId: PeerId, addrs: seq[MultiAddress]
) -> RestApiResponse:
## Connect to a peer
@ -929,7 +926,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse:
## Print rudimentary node information
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -949,7 +946,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
"",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
"codex": {
"storage": {
"version": $codexVersion,
"revision": $codexRevision,
"contracts": $codexContractsRevision,
@ -964,7 +961,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do(
router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do(
level: Option[string]
) -> RestApiResponse:
## Set log level at run time
@ -990,8 +987,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
when codex_enable_api_debug_peers:
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do(
when storage_enable_api_debug_peers:
router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do(
peerId: PeerId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/crypto/crypto
import pkg/bearssl/rand

View File

@ -22,7 +22,7 @@ import ./utils/exceptions
## Sales holds a list of available storage that it may sell.
##
## When storage is requested on the market that matches availability, the Sales
## object will instruct the Codex node to persist the requested data. Once the
## object will instruct the Logos Storage node to persist the requested data. Once the
## data has been persisted, it uploads a proof of storage to the market in an
## attempt to win a storage contract.
##

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -27,9 +27,7 @@
## | UInt256 | totalRemainingCollateral | |
## +---------------------------------------------------+
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sequtils
import std/sugar
@ -38,7 +36,6 @@ import std/sequtils
import std/times
import pkg/chronos
import pkg/datastore
import pkg/nimcrypto
import pkg/questionable
import pkg/questionable/results
import pkg/stint
@ -55,6 +52,8 @@ import ../units
export requests
export logutils
from nimcrypto import randomBytes
logScope:
topics = "marketplace sales reservations"
@ -92,14 +91,10 @@ type
repo: RepoStore
OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.
upraises: [], gcsafe, async: (raises: [CancelledError]), closure
.}
IterDispose* =
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
upraises: [], gcsafe, async: (raises: [])
.}
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* =
proc(availability: Availability): Future[void] {.async: (raises: []).}
StorableIter* = ref object
finished*: bool
next*: GetNext

View File

@ -2,7 +2,6 @@ import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/upraises
import ../contracts/requests
import ../errors
import ../logutils
@ -113,14 +112,12 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
method onFulfilled*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
) {.base, gcsafe, raises: [].} =
let cancelled = agent.data.cancelled
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
cancelled.cancelSoon()
method onFailed*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
without request =? agent.data.request:
return
if agent.data.requestId == requestId:
@ -128,7 +125,7 @@ method onFailed*(
method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
) {.base, gcsafe, upraises: [].} =
) {.base, gcsafe, raises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex))

View File

@ -1,6 +1,5 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import pkg/libp2p/cid
import ../market
@ -24,21 +23,20 @@ type
slotQueue*: SlotQueue
simulateProofFailures*: int
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
.}
BlocksCb* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnStore* = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
blocksCb: BlocksCb,
isRepairing: bool,
): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).}
): Future[?!void] {.async: (raises: [CancelledError]).}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, async: (raises: [CancelledError])
async: (raises: [CancelledError])
.}
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
gcsafe, async: (raises: [CancelledError])
async: (raises: [CancelledError])
.}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}

View File

@ -15,8 +15,7 @@ logScope:
topics = "marketplace slotqueue"
type
OnProcessSlot* =
proc(item: SlotQueueItem): Future[void] {.gcsafe, async: (raises: []).}
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg

View File

@ -1,5 +1,4 @@
import pkg/questionable
import pkg/upraises
import ../errors
import ../utils/asyncstatemachine
import ../market
@ -16,17 +15,17 @@ type
method onCancelled*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
method onFailed*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: uint64
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
proc cancelledEvent*(request: StorageRequest): Event =

View File

@ -1,6 +1,5 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ../statemachine
import ../salesagent

View File

@ -11,7 +11,7 @@ import ./cancelled
import ./failed
import ./proving
when codex_enable_proof_failures:
when storage_enable_proof_failures:
import ./provingsimulated
logScope:
@ -59,7 +59,7 @@ method run*(
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
return some State(SaleErrored(error: err))
when codex_enable_proof_failures:
when storage_enable_proof_failures:
if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures
return some State(

View File

@ -61,7 +61,7 @@ method run*(
return some State(SaleIgnored(reprocessSlot: false))
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
# due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
logScope:
slotIndex = data.slotIndex

View File

@ -1,5 +1,5 @@
import ../../conf
when codex_enable_proof_failures:
when storage_enable_proof_failures:
import std/strutils
import pkg/stint
import pkg/ethers

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -18,18 +18,20 @@ import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/constantine/math/io/io_fields
import pkg/taskpools
import ../../logutils
import ../../utils
import ../../stores
import ../../manifest
import ../../merkletree
import ../../utils/poseidon2digest
import ../../utils/asynciter
import ../../indexingstrategy
import ../converters
export converters, asynciter
export converters, asynciter, poseidon2digest
logScope:
topics = "codex slotsbuilder"
@ -45,6 +47,7 @@ type SlotsBuilder*[T, H] = ref object of RootObj
emptyBlock: seq[byte] # empty block
verifiableTree: ?T # verification tree (dataset tree)
emptyDigestTree: T # empty digest tree for empty blocks
taskPool: Taskpool
func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} =
## Returns true if the slots are verifiable.
@ -165,6 +168,35 @@ proc buildBlockTree*[T, H](
success (blk.data, tree)
proc getBlockDigest*[T, H](
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
): Future[?!H] {.async: (raises: [CancelledError]).} =
logScope:
blkIdx = blkIdx
slotPos = slotPos
numSlotBlocks = self.manifest.numSlotBlocks
cellSize = self.cellSize
trace "Building block tree"
if slotPos > (self.manifest.numSlotBlocks - 1):
# pad blocks are 0 byte blocks
trace "Returning empty digest tree for pad block"
return self.emptyDigestTree.root
without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err:
error "Failed to get block CID for tree at index", err = err.msg
return failure(err)
if blk.isEmpty:
return self.emptyDigestTree.root
without dg =? (await T.digest(self.taskPool, blk.data, self.cellSize.int)), err:
error "Failed to create digest for block", err = err.msg
return failure(err)
return success dg
proc getCellHashes*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} =
@ -190,8 +222,7 @@ proc getCellHashes*[T, H](
pos = i
trace "Getting block CID for tree at index"
without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root,
err:
without digest =? (await self.getBlockDigest(blkIdx, i)), err:
error "Failed to get block CID for tree at index", err = err.msg
return failure(err)
@ -310,6 +341,7 @@ proc new*[T, H](
_: type SlotsBuilder[T, H],
store: BlockStore,
manifest: Manifest,
taskPool: Taskpool,
strategy = LinearStrategy,
cellSize = DefaultCellSize,
): ?!SlotsBuilder[T, H] =
@ -383,6 +415,7 @@ proc new*[T, H](
emptyBlock: emptyBlock,
numSlotBlocks: numSlotBlocksTotal,
emptyDigestTree: emptyDigestTree,
taskPool: taskPool,
)
if manifest.verifiable:

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -13,6 +13,7 @@ import pkg/chronicles
import pkg/circomcompat
import pkg/poseidon2
import pkg/questionable/results
import pkg/taskpools
import pkg/libp2p/cid
@ -47,6 +48,7 @@ type
backend: AnyBackend
store: BlockStore
nSamples: int
taskPool: Taskpool
proc prove*(
self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge
@ -61,7 +63,7 @@ proc prove*(
trace "Received proof challenge"
without builder =? AnyBuilder.new(self.store, manifest), err:
without builder =? AnyBuilder.new(self.store, manifest, self.taskPool), err:
error "Unable to create slots builder", err = err.msg
return failure(err)
@ -88,6 +90,6 @@ proc verify*(
self.backend.verify(proof, inputs)
proc new*(
_: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int
_: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int, tp: Taskpool
): Prover =
Prover(store: store, backend: backend, nSamples: nSamples)
Prover(store: store, backend: backend, nSamples: nSamples, taskPool: tp)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -29,7 +29,7 @@ type
Block
Both
CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, async: (raises: []).}
CidCallback* = proc(cid: Cid): Future[void] {.async: (raises: []).}
BlockStore* = ref object of RootObj
onBlockStored*: ?CidCallback
@ -70,6 +70,14 @@ method completeBlock*(
) {.base, gcsafe.} =
discard
method getBlocks*(
self: BlockStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
## Gets a set of blocks from the blockstore. Blocks might
## be returned in any order.
raiseAssert("getBlocks not implemented!")
method getBlockAndProof*(
self: BlockStore, treeCid: Cid, index: Natural
): Future[?!(Block, CodexProof)] {.base, async: (raises: [CancelledError]), gcsafe.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -66,6 +66,21 @@ method getBlock*(
trace "Error requesting block from cache", cid, error = exc.msg
return failure exc
method getBlocks*(
self: CacheStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var i = 0
proc isFinished(): bool =
i == addresses.len
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
let value = await self.getBlock(addresses[i])
inc(i)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
method getCidAndProof*(
self: CacheStore, treeCid: Cid, index: Natural
): Future[?!(Cid, CodexProof)] {.async: (raises: [CancelledError]).} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sugar
import pkg/questionable/results

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -31,6 +31,31 @@ type NetworkStore* = ref object of BlockStore
engine*: BlockExcEngine # blockexc decision engine
localStore*: BlockStore # local block store
method getBlocks*(
self: NetworkStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var
localAddresses: seq[BlockAddress]
remoteAddresses: seq[BlockAddress]
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for address in addresses:
if not (await address in self.localStore):
remoteAddresses.add(address)
else:
localAddresses.add(address)
if (Moment.now() - lastIdle) >= runtimeQuota:
await idleAsync()
lastIdle = Moment.now()
return chain(
await self.localStore.getBlocks(localAddresses),
self.engine.requestBlocks(remoteAddresses),
)
method getBlock*(
self: NetworkStore, address: BlockAddress
): Future[?!Block] {.async: (raises: [CancelledError]).} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -38,6 +38,21 @@ logScope:
# BlockStore API
###########################################################
method getBlocks*(
self: RepoStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var i = 0
proc isFinished(): bool =
i == addresses.len
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
let value = await self.getBlock(addresses[i])
inc(i)
return value
return SafeAsyncIter[Block].new(genNext, isFinished)
method getBlock*(
self: RepoStore, cid: Cid
): Future[?!Block] {.async: (raises: [CancelledError]).} =
@ -428,7 +443,6 @@ proc start*(
): Future[void] {.async: (raises: [CancelledError, CodexError]).} =
## Start repo
##
if self.started:
trace "Repo already started"
return
@ -450,6 +464,5 @@ proc stop*(self: RepoStore): Future[void] {.async: (raises: []).} =
return
trace "Stopping repo"
await self.close()
self.started = false

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sugar
import pkg/chronos

View File

@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/chronos
import pkg/libp2p

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
import std/options
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/chronos
import pkg/stew/ptrops

View File

@ -1,9 +1,8 @@
import std/times
import pkg/upraises
import ./clock
type SystemClock* = ref object of Clock
method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} =
method now*(clock: SystemClock): SecondsSince1970 {.raises: [].} =
let now = times.now().utc
now.toTime().toUnix()

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,15 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/net
import std/strutils
import std/options
import pkg/libp2p
import pkg/stew/shims/net
import pkg/stew/endians2
func remapAddr*(

Some files were not shown because too many files have changed in this diff Show More