mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-03 22:13:12 +00:00
Merge ac5e3c733ba3b02ae1919b3879cdea842c795932 into 20b6d76b4203c1e76a1c10f05425dea7112b9af8
This commit is contained in:
commit
ec5a8cdfed
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
@ -16,6 +16,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
|
||||
19
.github/workflows/conventional-commits.yml
vendored
Normal file
19
.github/workflows/conventional-commits.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
name: Conventional Commits Linting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
pr-title:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- name: PR Conventional Commit Validation
|
||||
uses: ytanikin/pr-conventional-commits@1.4.1
|
||||
with:
|
||||
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'
|
||||
175
.github/workflows/deploy-devnet.yml
vendored
Normal file
175
.github/workflows/deploy-devnet.yml
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
name: Deploy - Devnet
|
||||
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
codex_image:
|
||||
description: codexstorage/nim-codex:latest-dist-tests
|
||||
required: false
|
||||
type: string
|
||||
workflow_call:
|
||||
inputs:
|
||||
codex_image:
|
||||
description: codexstorage/nim-codex:latest-dist-tests
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CODEX_NAMESPACE: codex
|
||||
TOOLS_NAMESPACE: common
|
||||
KUBE_CONFIG: ${{ secrets.DEVNET_KUBE_CONFIG }}
|
||||
KUBE_VERSION: v1.33.1
|
||||
CODEX_IMAGE: ${{ inputs.codex_image }}
|
||||
SSH_HOSTS: ${{ secrets.DEVNET_SSH_HOSTS }}
|
||||
SSH_PORT: ${{ secrets.DEVNET_SSH_PORT }}
|
||||
SSH_USERNAME: ${{ secrets.DEVNET_SSH_USERNAME }}
|
||||
SSH_PRIVATE_KEY: ${{ secrets.DEVNET_SSH_KEY }}
|
||||
|
||||
|
||||
jobs:
|
||||
deploy-contracts:
|
||||
name: Deploy contracts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create access token
|
||||
uses: actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ secrets.DEPLOYER_APP_ID }}
|
||||
private-key: ${{ secrets.DEPLOYER_PRIVATE_KEY }}
|
||||
repositories: codex-contracts-eth
|
||||
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Get contracts submodule ref
|
||||
id: contracts
|
||||
run: echo "ref=$(git rev-parse HEAD:vendor/codex-contracts-eth)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Deploy smart contracts
|
||||
uses: the-actions-org/workflow-dispatch@v4
|
||||
with:
|
||||
repo: codex-storage/codex-contracts-eth
|
||||
workflow: devnet-contracts.yml
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
wait-for-completion-timeout: 20m
|
||||
wait-for-completion-interval: 20s
|
||||
inputs: '{ "network": "codex_devnet", "contracts_ref": "${{ steps.contracts.outputs.ref }}" }'
|
||||
|
||||
|
||||
bootstrap-nodes:
|
||||
name: Bootstrap nodes
|
||||
runs-on: ubuntu-latest
|
||||
needs: deploy-contracts
|
||||
steps:
|
||||
- name: Codex Bootstrap - Update
|
||||
uses: appleboy/ssh-action@v1
|
||||
with:
|
||||
host: ${{ secrets.DEVNET_SSH_HOSTS }}
|
||||
username: ${{ secrets.DEVNET_SSH_USERNAME }}
|
||||
key: ${{ secrets.DEVNET_SSH_KEY }}
|
||||
port: ${{ secrets.DEVNET_SSH_PORT }}
|
||||
script: /opt/codex/remote-deploy.sh ${{ env.CODEX_IMAGE }}
|
||||
|
||||
cluster-nodes:
|
||||
name: Cluster nodes
|
||||
runs-on: ubuntu-latest
|
||||
needs: bootstrap-nodes
|
||||
steps:
|
||||
- name: Kubectl - Install ${{ env.KUBE_VERSION }}
|
||||
uses: azure/setup-kubectl@v4
|
||||
with:
|
||||
version: ${{ env.KUBE_VERSION }}
|
||||
|
||||
- name: Kubectl - Kubeconfig
|
||||
run: |
|
||||
mkdir -p "${HOME}"/.kube
|
||||
echo "${{ env.KUBE_CONFIG }}" | base64 -d > "${HOME}"/.kube/config
|
||||
|
||||
- name: Codex Storage - Update
|
||||
run: |
|
||||
for node in {1..5}; do
|
||||
kubectl -n "${{ env.CODEX_NAMESPACE }}" patch statefulset codex-storage-${node} \
|
||||
--patch '{"spec": {"template": {"spec":{"containers":[{"name": "codex", "image":"${{ env.CODEX_IMAGE }}"}]}}}}'
|
||||
done
|
||||
|
||||
- name: Codex Validators - Update
|
||||
run: |
|
||||
for node in {1..1}; do
|
||||
kubectl -n "${{ env.CODEX_NAMESPACE }}" patch statefulset codex-validator-${node} \
|
||||
--patch '{"spec": {"template": {"spec":{"containers":[{"name": "codex", "image":"${{ env.CODEX_IMAGE }}"}]}}}}'
|
||||
done
|
||||
|
||||
- name: Codex Storage - Status
|
||||
run: |
|
||||
WAIT=300
|
||||
SECONDS=0
|
||||
sleep=1
|
||||
for instance in {1..5}; do
|
||||
while (( SECONDS < WAIT )); do
|
||||
pod=codex-storage-${instance}-1
|
||||
phase=$(kubectl get pod "${pod}" -n "${{ env.CODEX_NAMESPACE }}" -o jsonpath='{.status.phase}')
|
||||
if [[ "${phase}" == "Running" ]]; then
|
||||
echo "Pod ${pod} is in the ${phase} state"
|
||||
break
|
||||
else
|
||||
echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))"
|
||||
fi
|
||||
sleep "${sleep}"
|
||||
done
|
||||
done
|
||||
|
||||
- name: Codex Validators - Status
|
||||
run: |
|
||||
WAIT=300
|
||||
SECONDS=0
|
||||
sleep=1
|
||||
for instance in {1..1}; do
|
||||
while (( SECONDS < WAIT )); do
|
||||
pod=codex-validator-${instance}-1
|
||||
phase=$(kubectl get pod "${pod}" -n "${{ env.CODEX_NAMESPACE }}" -o jsonpath='{.status.phase}')
|
||||
if [[ "${phase}" == "Running" ]]; then
|
||||
echo "Pod ${pod} is in the ${phase} state"
|
||||
break
|
||||
else
|
||||
echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))"
|
||||
fi
|
||||
sleep "${sleep}"
|
||||
done
|
||||
done
|
||||
|
||||
- name: Tools - Update
|
||||
run: |
|
||||
crawler_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app.kubernetes.io/name=crawler' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
|
||||
discordbot_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app=discordbot' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
|
||||
|
||||
for pod in "${crawler_pod}" "${discordbot_pod}"; do
|
||||
if [[ -n "${pod}" ]]; then
|
||||
kubectl delete pod -n "${{ env.TOOLS_NAMESPACE }}" "${pod}" --grace-period=10
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Tools - Status
|
||||
run: |
|
||||
WAIT=300
|
||||
SECONDS=0
|
||||
sleep=1
|
||||
crawler_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app.kubernetes.io/name=crawler' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
|
||||
discordbot_pod=$(kubectl get pod -n "${{ env.TOOLS_NAMESPACE }}" -l 'app=discordbot' -ojsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
|
||||
for pod in "${crawler_pod}" "${discordbot_pod}"; do
|
||||
if [[ -n "${pod}" ]]; then
|
||||
while (( SECONDS < WAIT )); do
|
||||
phase=$(kubectl get pod "${pod}" -n "${{ env.TOOLS_NAMESPACE }}" -o jsonpath='{.status.phase}')
|
||||
if [[ "${phase}" == "Running" ]]; then
|
||||
echo "Pod ${pod} is in the ${phase} state"
|
||||
break
|
||||
else
|
||||
echo "Pod ${pod} is in the ${phase} state - Check in ${sleep} second(s) / $((WAIT - SECONDS))"
|
||||
fi
|
||||
sleep "${sleep}"
|
||||
done
|
||||
fi
|
||||
done
|
||||
16
.github/workflows/docker-dist-tests.yml
vendored
16
.github/workflows/docker-dist-tests.yml
vendored
@ -13,6 +13,7 @@ on:
|
||||
- '.github/**'
|
||||
- '!.github/workflows/docker-dist-tests.yml'
|
||||
- '!.github/workflows/docker-reusable.yml'
|
||||
- '!.github/workflows/deploy-devnet.yml'
|
||||
- 'docker/**'
|
||||
- '!docker/codex.Dockerfile'
|
||||
- '!docker/docker-entrypoint.sh'
|
||||
@ -23,6 +24,11 @@ on:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
deploy_devnet:
|
||||
description: Deploy Devnet
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
|
||||
jobs:
|
||||
@ -40,6 +46,7 @@ jobs:
|
||||
run: |
|
||||
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
@ -53,3 +60,12 @@ jobs:
|
||||
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests"
|
||||
run_release_tests: ${{ inputs.run_release_tests }}
|
||||
secrets: inherit
|
||||
|
||||
deploy-devnet:
|
||||
name: Deploy Devnet
|
||||
uses: ./.github/workflows/deploy-devnet.yml
|
||||
needs: build-and-push
|
||||
if: ${{ inputs.deploy_devnet || github.event_name == 'push' && github.ref_name == github.event.repository.default_branch }}
|
||||
with:
|
||||
codex_image: ${{ needs.build-and-push.outputs.codex_image }}
|
||||
secrets: inherit
|
||||
|
||||
50
.github/workflows/docker-reusable.yml
vendored
50
.github/workflows/docker-reusable.yml
vendored
@ -68,6 +68,10 @@ on:
|
||||
description: Specifies compatible smart contract image
|
||||
required: false
|
||||
type: string
|
||||
outputs:
|
||||
codex_image:
|
||||
description: Codex Docker image tag
|
||||
value: ${{ jobs.publish.outputs.codex_image }}
|
||||
|
||||
|
||||
env:
|
||||
@ -91,15 +95,16 @@ env:
|
||||
|
||||
|
||||
jobs:
|
||||
# Compute variables
|
||||
compute:
|
||||
name: Compute build ID
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build_id: ${{ steps.build_id.outputs.build_id }}
|
||||
steps:
|
||||
- name: Generate unique build id
|
||||
id: build_id
|
||||
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
|
||||
name: Compute build ID
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build_id: ${{ steps.build_id.outputs.build_id }}
|
||||
steps:
|
||||
- name: Generate unique build id
|
||||
id: build_id
|
||||
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
|
||||
|
||||
# Build platform specific image
|
||||
build:
|
||||
@ -134,7 +139,7 @@ jobs:
|
||||
run: |
|
||||
# Create contract label for compatible contract image if specified
|
||||
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Meta
|
||||
@ -189,35 +194,35 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
codex_image: ${{ steps.image_tag.outputs.codex_image }}
|
||||
needs: [build, compute]
|
||||
steps:
|
||||
|
||||
- name: Docker - Variables
|
||||
run: |
|
||||
# Adjust custom suffix when set and
|
||||
# Adjust custom suffix when set
|
||||
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
|
||||
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
|
||||
fi
|
||||
# Disable SHA tags on tagged release
|
||||
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
|
||||
echo "TAG_SHA=false" >>$GITHUB_ENV
|
||||
echo "TAG_SHA=false" >> $GITHUB_ENV
|
||||
fi
|
||||
# Handle latest and latest-custom using raw
|
||||
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
|
||||
echo "TAG_LATEST=false" >>$GITHUB_ENV
|
||||
echo "TAG_RAW=true" >>$GITHUB_ENV
|
||||
echo "TAG_LATEST=false" >> $GITHUB_ENV
|
||||
echo "TAG_RAW=true" >> $GITHUB_ENV
|
||||
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
|
||||
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
|
||||
echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
|
||||
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
|
||||
fi
|
||||
else
|
||||
echo "TAG_RAW=false" >>$GITHUB_ENV
|
||||
echo "TAG_RAW=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
# Create contract label for compatible contract image if specified
|
||||
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Download digests
|
||||
@ -257,9 +262,12 @@ jobs:
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
|
||||
|
||||
- name: Docker - Image tag
|
||||
id: image_tag
|
||||
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Docker - Inspect image
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
|
||||
run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
|
||||
|
||||
|
||||
# Compute Tests inputs
|
||||
|
||||
2
.github/workflows/nim-matrix.yml
vendored
2
.github/workflows/nim-matrix.yml
vendored
@ -8,7 +8,7 @@ env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: pinned
|
||||
|
||||
jobs:
|
||||
jobs:
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -32,7 +32,6 @@ jobs:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
|
||||
|
||||
@ -189,6 +188,7 @@ jobs:
|
||||
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
|
||||
echo "${branch}" > "${folder}"/latest
|
||||
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
|
||||
rm -f "${folder}"/latest
|
||||
|
||||
# master branch
|
||||
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
|
||||
|
||||
19
.gitmodules
vendored
19
.gitmodules
vendored
@ -37,22 +37,17 @@
|
||||
path = vendor/nim-nitro
|
||||
url = https://github.com/status-im/nim-nitro.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/questionable"]
|
||||
path = vendor/questionable
|
||||
url = https://github.com/status-im/questionable.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/upraises"]
|
||||
path = vendor/upraises
|
||||
url = https://github.com/markspanbroek/upraises.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/asynctest"]
|
||||
path = vendor/asynctest
|
||||
url = https://github.com/status-im/asynctest.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/nim-presto"]
|
||||
path = vendor/nim-presto
|
||||
url = https://github.com/status-im/nim-presto.git
|
||||
@ -132,7 +127,7 @@
|
||||
path = vendor/nim-websock
|
||||
url = https://github.com/status-im/nim-websock.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/nim-contract-abi"]
|
||||
path = vendor/nim-contract-abi
|
||||
url = https://github.com/status-im/nim-contract-abi
|
||||
@ -160,7 +155,7 @@
|
||||
path = vendor/nim-taskpools
|
||||
url = https://github.com/status-im/nim-taskpools.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = stable
|
||||
[submodule "vendor/nim-leopard"]
|
||||
path = vendor/nim-leopard
|
||||
url = https://github.com/status-im/nim-leopard.git
|
||||
@ -225,9 +220,9 @@
|
||||
path = vendor/nim-quic
|
||||
url = https://github.com/vacp2p/nim-quic.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
[submodule "vendor/nim-ngtcp2"]
|
||||
path = vendor/nim-ngtcp2
|
||||
url = https://github.com/vacp2p/nim-ngtcp2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
branch = main
|
||||
|
||||
29
Makefile
29
Makefile
@ -232,6 +232,7 @@ format:
|
||||
$(NPH) *.nim
|
||||
$(NPH) codex/
|
||||
$(NPH) tests/
|
||||
$(NPH) library/
|
||||
|
||||
clean-nph:
|
||||
rm -f $(NPH)
|
||||
@ -242,4 +243,32 @@ print-nph-path:
|
||||
|
||||
clean: | clean-nph
|
||||
|
||||
################
|
||||
## C Bindings ##
|
||||
################
|
||||
.PHONY: libcodex
|
||||
|
||||
STATIC ?= 0
|
||||
|
||||
ifneq ($(strip $(CODEX_LIB_PARAMS)),)
|
||||
NIM_PARAMS := $(NIM_PARAMS) $(CODEX_LIB_PARAMS)
|
||||
endif
|
||||
|
||||
libcodex:
|
||||
$(MAKE) deps
|
||||
rm -f build/libcodex*
|
||||
|
||||
ifeq ($(STATIC), 1)
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && \
|
||||
$(ENV_SCRIPT) nim libcodexStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
else ifeq ($(detected_OS),Windows)
|
||||
echo -e $(BUILD_MSG) "build/$@.dll" && \
|
||||
$(ENV_SCRIPT) nim libcodexDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
else ifeq ($(detected_OS),macOS)
|
||||
echo -e $(BUILD_MSG) "build/$@.dylib" && \
|
||||
$(ENV_SCRIPT) nim libcodexDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
else
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && \
|
||||
$(ENV_SCRIPT) nim libcodexDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
endif
|
||||
endif # "variables.mk" was not included
|
||||
|
||||
50
README.md
50
README.md
@ -53,6 +53,56 @@ To get acquainted with Codex, consider:
|
||||
|
||||
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
||||
|
||||
## Bindings
|
||||
|
||||
Codex provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
|
||||
Currently, only a Go binding is included.
|
||||
|
||||
### Build the C library
|
||||
|
||||
```bash
|
||||
make libcodex
|
||||
```
|
||||
|
||||
This produces the shared library under `build/`.
|
||||
|
||||
### Run the Go example
|
||||
|
||||
Build the Go example:
|
||||
|
||||
```bash
|
||||
go build -o codex-go examples/golang/codex.go
|
||||
```
|
||||
|
||||
Export the library path:
|
||||
|
||||
```bash
|
||||
export LD_LIBRARY_PATH=build
|
||||
```
|
||||
|
||||
Run the example:
|
||||
|
||||
```bash
|
||||
./codex-go
|
||||
```
|
||||
|
||||
### Static vs Dynamic build
|
||||
|
||||
By default, Codex builds a dynamic library (`libcodex.so`), which you can load at runtime.
|
||||
If you prefer a static library (`libcodex.a`), set the `STATIC` flag:
|
||||
|
||||
```bash
|
||||
# Build dynamic (default)
|
||||
make libcodex
|
||||
|
||||
# Build static
|
||||
make STATIC=1 libcodex
|
||||
```
|
||||
|
||||
### Limitation
|
||||
|
||||
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
|
||||
|
||||
## Contributing and development
|
||||
|
||||
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
||||
|
||||
@ -41,19 +41,18 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
|
||||
)
|
||||
benchRuns[benchmarkName] = (runs.avg(), count)
|
||||
|
||||
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
|
||||
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
|
||||
if printRegular:
|
||||
echo ""
|
||||
for k, v in benchRuns:
|
||||
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
||||
|
||||
|
||||
if printTsv:
|
||||
echo ""
|
||||
echo "name", "\t", "avgTimeSec", "\t", "count"
|
||||
for k, v in benchRuns:
|
||||
echo k, "\t", v.avgTimeSec, "\t", v.count
|
||||
|
||||
|
||||
import std/math
|
||||
|
||||
func floorLog2*(x: int): int =
|
||||
|
||||
44
build.nims
44
build.nims
@ -25,6 +25,30 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
|
||||
exec(cmd)
|
||||
|
||||
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
|
||||
if `type` == "dynamic":
|
||||
let lib_name = (
|
||||
when defined(windows): name & ".dll"
|
||||
elif defined(macosx): name & ".dylib"
|
||||
else: name & ".so"
|
||||
)
|
||||
exec "nim c" & " --out:build/" & lib_name &
|
||||
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||
"--nimMainPrefix:libcodex -d:noSignalHandler " &
|
||||
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
|
||||
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
|
||||
else:
|
||||
exec "nim c" & " --out:build/" & name &
|
||||
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||
"--nimMainPrefix:libcodex -d:noSignalHandler " &
|
||||
"-d:LeopardExtraCompilerFlags=-fPIC " &
|
||||
"-d:chronicles_runtime_filtering " &
|
||||
"-d:chronicles_log_level=TRACE " &
|
||||
params & " " & srcDir & name & ".nim"
|
||||
|
||||
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, srcDir, params
|
||||
exec "build/" & name
|
||||
@ -121,3 +145,23 @@ task showCoverage, "open coverage html":
|
||||
echo " ======== Opening HTML coverage report in browser... ======== "
|
||||
if findExe("open") != "":
|
||||
exec("open coverage/report/index.html")
|
||||
|
||||
task libcodexDynamic, "Generate bindings":
|
||||
var params = ""
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
if param.len > 0 and param.startsWith("-"):
|
||||
params.add " " & param
|
||||
|
||||
let name = "libcodex"
|
||||
buildLibrary name, "library/", params, "dynamic"
|
||||
|
||||
task libcodexStatic, "Generate bindings":
|
||||
var params = ""
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
if param.len > 0 and param.startsWith("-"):
|
||||
params.add " " & param
|
||||
|
||||
let name = "libcodex"
|
||||
buildLibrary name, "library/", params, "static"
|
||||
|
||||
12
codex.nim
12
codex.nim
@ -54,6 +54,16 @@ when isMainModule:
|
||||
,
|
||||
)
|
||||
config.setupLogging()
|
||||
|
||||
try:
|
||||
updateLogLevel(config.logLevel)
|
||||
except ValueError as err:
|
||||
try:
|
||||
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
|
||||
except IOError:
|
||||
echo "Invalid value for --log-level. " & err.msg
|
||||
quit QuitFailure
|
||||
|
||||
config.setupMetrics()
|
||||
|
||||
if not (checkAndCreateDataDir((config.dataDir).string)):
|
||||
@ -94,7 +104,7 @@ when isMainModule:
|
||||
|
||||
## Ctrl+C handling
|
||||
proc doShutdown() =
|
||||
shutdown = server.stop()
|
||||
shutdown = server.shutdown()
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
notice "Stopping Codex"
|
||||
|
||||
@ -8,6 +8,7 @@
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
import std/algorithm
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p/cid
|
||||
@ -38,6 +39,7 @@ const
|
||||
DefaultConcurrentDiscRequests = 10
|
||||
DefaultDiscoveryTimeout = 1.minutes
|
||||
DefaultMinPeersPerBlock = 3
|
||||
DefaultMaxPeersPerBlock = 8
|
||||
DefaultDiscoveryLoopSleep = 3.seconds
|
||||
|
||||
type DiscoveryEngine* = ref object of RootObj
|
||||
@ -51,11 +53,32 @@ type DiscoveryEngine* = ref object of RootObj
|
||||
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
|
||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||
minPeersPerBlock*: int # Max number of peers with block
|
||||
minPeersPerBlock*: int # Min number of peers with block
|
||||
maxPeersPerBlock*: int # Max number of peers with block
|
||||
discoveryLoopSleep: Duration # Discovery loop sleep
|
||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
|
||||
# Inflight discovery requests
|
||||
|
||||
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
|
||||
var haves = b.peers.peersHave(cid)
|
||||
let count = haves.len - b.maxPeersPerBlock
|
||||
if count <= 0:
|
||||
return
|
||||
|
||||
haves.sort(
|
||||
proc(a, b: BlockExcPeerCtx): int =
|
||||
cmp(a.lastExchange, b.lastExchange)
|
||||
)
|
||||
|
||||
let toRemove = haves[0 ..< count]
|
||||
for peer in toRemove:
|
||||
try:
|
||||
peer.cleanPresence(BlockAddress.init(cid))
|
||||
trace "Removed block presence from peer", cid, peer = peer.id
|
||||
except CatchableError as exc:
|
||||
error "Failed to clean presence for peer",
|
||||
cid, peer = peer.id, error = exc.msg, name = exc.name
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
@ -78,8 +101,16 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
trace "Discovery request already in progress", cid
|
||||
continue
|
||||
|
||||
trace "Running discovery task for cid", cid
|
||||
|
||||
let haves = b.peers.peersHave(cid)
|
||||
|
||||
if haves.len > b.maxPeersPerBlock:
|
||||
trace "Cleaning up excess peers",
|
||||
cid, peers = haves.len, max = b.maxPeersPerBlock
|
||||
b.cleanupExcessPeers(cid)
|
||||
continue
|
||||
|
||||
if haves.len < b.minPeersPerBlock:
|
||||
let request = b.discovery.find(cid)
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
@ -156,6 +187,7 @@ proc new*(
|
||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||
maxPeersPerBlock = DefaultMaxPeersPerBlock,
|
||||
): DiscoveryEngine =
|
||||
## Create a discovery engine instance for advertising services
|
||||
##
|
||||
@ -171,4 +203,5 @@ proc new*(
|
||||
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
||||
discoveryLoopSleep: discoveryLoopSleep,
|
||||
minPeersPerBlock: minPeersPerBlock,
|
||||
maxPeersPerBlock: maxPeersPerBlock,
|
||||
)
|
||||
|
||||
@ -12,12 +12,14 @@ import std/sets
|
||||
import std/options
|
||||
import std/algorithm
|
||||
import std/sugar
|
||||
import std/random
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p/[cid, switch, multihash, multicodec]
|
||||
import pkg/metrics
|
||||
import pkg/stint
|
||||
import pkg/questionable
|
||||
import pkg/stew/shims/sets
|
||||
|
||||
import ../../rng
|
||||
import ../../stores/blockstore
|
||||
@ -63,30 +65,59 @@ declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sen
|
||||
declareCounter(
|
||||
codex_block_exchange_blocks_received, "codex blockexchange blocks received"
|
||||
)
|
||||
declareCounter(
|
||||
codex_block_exchange_spurious_blocks_received,
|
||||
"codex blockexchange unrequested/duplicate blocks received",
|
||||
)
|
||||
declareCounter(
|
||||
codex_block_exchange_discovery_requests_total,
|
||||
"Total number of peer discovery requests sent",
|
||||
)
|
||||
declareCounter(
|
||||
codex_block_exchange_peer_timeouts_total, "Total number of peer activity timeouts"
|
||||
)
|
||||
declareCounter(
|
||||
codex_block_exchange_requests_failed_total,
|
||||
"Total number of block requests that failed after exhausting retries",
|
||||
)
|
||||
|
||||
const
|
||||
DefaultMaxPeersPerRequest* = 10
|
||||
# The default max message length of nim-libp2p is 100 megabytes, meaning we can
|
||||
# in principle fit up to 1600 64k blocks per message, so 20 is well under
|
||||
# that number.
|
||||
DefaultMaxBlocksPerMessage = 20
|
||||
DefaultTaskQueueSize = 100
|
||||
DefaultConcurrentTasks = 10
|
||||
# Don't do more than one discovery request per `DiscoveryRateLimit` seconds.
|
||||
DiscoveryRateLimit = 3.seconds
|
||||
DefaultPeerActivityTimeout = 1.minutes
|
||||
# Match MaxWantListBatchSize to efficiently respond to incoming WantLists
|
||||
PresenceBatchSize = MaxWantListBatchSize
|
||||
CleanupBatchSize = 2048
|
||||
|
||||
type
|
||||
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
|
||||
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
|
||||
PeerSelector* =
|
||||
proc(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx {.gcsafe, raises: [].}
|
||||
|
||||
BlockExcEngine* = ref object of RootObj
|
||||
localStore*: BlockStore # Local block store for this instance
|
||||
network*: BlockExcNetwork # Petwork interface
|
||||
network*: BlockExcNetwork # Network interface
|
||||
peers*: PeerCtxStore # Peers we're currently actively exchanging with
|
||||
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
|
||||
# Peers we're currently processing tasks for
|
||||
selectPeer*: PeerSelector # Peers we're currently processing tasks for
|
||||
concurrentTasks: int # Number of concurrent peers we're serving at any given time
|
||||
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
|
||||
blockexcRunning: bool # Indicates if the blockexc task is running
|
||||
maxBlocksPerMessage: int
|
||||
# Maximum number of blocks we can squeeze in a single message
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
wallet*: WalletRef # Nitro wallet for micropayments
|
||||
pricing*: ?Pricing # Optional bandwidth pricing
|
||||
discovery*: DiscoveryEngine
|
||||
advertiser*: Advertiser
|
||||
lastDiscRequest: Moment # time of last discovery request
|
||||
|
||||
Pricing* = object
|
||||
address*: EthAddress
|
||||
@ -104,7 +135,6 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
|
||||
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## Start the blockexc task
|
||||
##
|
||||
|
||||
await self.discovery.start()
|
||||
await self.advertiser.start()
|
||||
|
||||
@ -154,8 +184,145 @@ proc sendWantBlock(
|
||||
) # we want this remote to send us a block
|
||||
codex_block_exchange_want_block_lists_sent.inc()
|
||||
|
||||
proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
|
||||
Rng.instance.sample(peers)
|
||||
proc sendBatchedWantList(
|
||||
self: BlockExcEngine,
|
||||
peer: BlockExcPeerCtx,
|
||||
addresses: seq[BlockAddress],
|
||||
full: bool,
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
var offset = 0
|
||||
while offset < addresses.len:
|
||||
let batchEnd = min(offset + MaxWantListBatchSize, addresses.len)
|
||||
let batch = addresses[offset ..< batchEnd]
|
||||
|
||||
trace "Sending want list batch",
|
||||
peer = peer.id,
|
||||
batchSize = batch.len,
|
||||
offset = offset,
|
||||
total = addresses.len,
|
||||
full = full
|
||||
|
||||
await self.network.request.sendWantList(
|
||||
peer.id, batch, full = (full and offset == 0)
|
||||
)
|
||||
for address in batch:
|
||||
peer.lastSentWants.incl(address)
|
||||
|
||||
offset = batchEnd
|
||||
|
||||
proc refreshBlockKnowledge(
|
||||
self: BlockExcEngine, peer: BlockExcPeerCtx, skipDelta = false, resetBackoff = false
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
if peer.lastSentWants.len > 0:
|
||||
var toRemove: seq[BlockAddress]
|
||||
|
||||
for address in peer.lastSentWants:
|
||||
if address notin self.pendingBlocks:
|
||||
toRemove.add(address)
|
||||
|
||||
if toRemove.len >= CleanupBatchSize:
|
||||
await idleAsync()
|
||||
break
|
||||
|
||||
for addr in toRemove:
|
||||
peer.lastSentWants.excl(addr)
|
||||
|
||||
if self.pendingBlocks.wantListLen == 0:
|
||||
if peer.lastSentWants.len > 0:
|
||||
trace "Clearing want list tracking, no pending blocks", peer = peer.id
|
||||
peer.lastSentWants.clear()
|
||||
return
|
||||
|
||||
# We send only blocks that the peer hasn't already told us that they already have.
|
||||
let
|
||||
peerHave = peer.peerHave
|
||||
toAsk = toHashSet(self.pendingBlocks.wantList.toSeq.filterIt(it notin peerHave))
|
||||
|
||||
if toAsk.len == 0:
|
||||
if peer.lastSentWants.len > 0:
|
||||
trace "Clearing want list tracking, peer has all blocks", peer = peer.id
|
||||
peer.lastSentWants.clear()
|
||||
return
|
||||
|
||||
let newWants = toAsk - peer.lastSentWants
|
||||
|
||||
if peer.lastSentWants.len > 0 and not skipDelta:
|
||||
if newWants.len > 0:
|
||||
trace "Sending delta want list update",
|
||||
peer = peer.id, newWants = newWants.len, totalWants = toAsk.len
|
||||
|
||||
await self.sendBatchedWantList(peer, newWants.toSeq, full = false)
|
||||
|
||||
if resetBackoff:
|
||||
peer.wantsUpdated
|
||||
else:
|
||||
trace "No changes in want list, skipping send", peer = peer.id
|
||||
peer.lastSentWants = toAsk
|
||||
else:
|
||||
trace "Sending full want list", peer = peer.id, length = toAsk.len
|
||||
|
||||
await self.sendBatchedWantList(peer, toAsk.toSeq, full = true)
|
||||
|
||||
if resetBackoff:
|
||||
peer.wantsUpdated
|
||||
|
||||
proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledError]).} =
|
||||
let runtimeQuota = 10.milliseconds
|
||||
var lastIdle = Moment.now()
|
||||
|
||||
for peer in self.peers.peers.values.toSeq:
|
||||
# We refresh block knowledge if:
|
||||
# 1. the peer hasn't been refreshed in a while;
|
||||
# 2. the list of blocks we care about has changed.
|
||||
#
|
||||
# Note that because of (2), it is important that we update our
|
||||
# want list in the coarsest way possible instead of over many
|
||||
# small updates.
|
||||
#
|
||||
|
||||
# In dynamic swarms, staleness will dominate latency.
|
||||
let
|
||||
hasNewBlocks = peer.lastRefresh < self.pendingBlocks.lastInclusion
|
||||
isKnowledgeStale = peer.isKnowledgeStale
|
||||
|
||||
if isKnowledgeStale or hasNewBlocks:
|
||||
if not peer.refreshInProgress:
|
||||
peer.refreshRequested()
|
||||
await self.refreshBlockKnowledge(
|
||||
peer, skipDelta = isKnowledgeStale, resetBackoff = hasNewBlocks
|
||||
)
|
||||
else:
|
||||
trace "Not refreshing: peer is up to date", peer = peer.id
|
||||
|
||||
if (Moment.now() - lastIdle) >= runtimeQuota:
|
||||
try:
|
||||
await idleAsync()
|
||||
except CancelledError:
|
||||
discard
|
||||
lastIdle = Moment.now()
|
||||
|
||||
proc searchForNewPeers(self: BlockExcEngine, cid: Cid) =
|
||||
if self.lastDiscRequest + DiscoveryRateLimit < Moment.now():
|
||||
trace "Searching for new peers for", cid = cid
|
||||
codex_block_exchange_discovery_requests_total.inc()
|
||||
self.lastDiscRequest = Moment.now() # always refresh before calling await!
|
||||
self.discovery.queueFindBlocksReq(@[cid])
|
||||
else:
|
||||
trace "Not searching for new peers, rate limit not expired", cid = cid
|
||||
|
||||
proc evictPeer(self: BlockExcEngine, peer: PeerId) =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
trace "Evicting disconnected/departed peer", peer
|
||||
|
||||
let peerCtx = self.peers.get(peer)
|
||||
if not peerCtx.isNil:
|
||||
for address in peerCtx.blocksRequested:
|
||||
self.pendingBlocks.clearRequest(address, peer.some)
|
||||
|
||||
# drop the peer from the peers table
|
||||
self.peers.remove(peer)
|
||||
|
||||
proc downloadInternal(
|
||||
self: BlockExcEngine, address: BlockAddress
|
||||
@ -173,41 +340,147 @@ proc downloadInternal(
|
||||
|
||||
if self.pendingBlocks.retriesExhausted(address):
|
||||
trace "Error retries exhausted"
|
||||
codex_block_exchange_requests_failed_total.inc()
|
||||
handle.fail(newException(RetriesExhaustedError, "Error retries exhausted"))
|
||||
break
|
||||
|
||||
trace "Running retry handle"
|
||||
let peers = self.peers.getPeersForBlock(address)
|
||||
logScope:
|
||||
peersWith = peers.with.len
|
||||
peersWithout = peers.without.len
|
||||
|
||||
trace "Peers for block"
|
||||
if peers.with.len > 0:
|
||||
self.pendingBlocks.setInFlight(address, true)
|
||||
await self.sendWantBlock(@[address], peers.with.randomPeer)
|
||||
else:
|
||||
self.pendingBlocks.setInFlight(address, false)
|
||||
if peers.with.len == 0:
|
||||
# We know of no peers that have the block.
|
||||
if peers.without.len > 0:
|
||||
await self.sendWantHave(@[address], peers.without)
|
||||
self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||
# If we have peers connected but none of them have the block, this
|
||||
# could be because our knowledge about what they have has run stale.
|
||||
# Tries to refresh it.
|
||||
await self.refreshBlockKnowledge()
|
||||
# Also tries to look for new peers for good measure.
|
||||
# TODO: in the future, peer search and knowledge maintenance should
|
||||
# be completely decoupled from one another. It is very hard to
|
||||
# control what happens and how many neighbors we get like this.
|
||||
self.searchForNewPeers(address.cidOrTreeCid)
|
||||
|
||||
await (handle or sleepAsync(self.pendingBlocks.retryInterval))
|
||||
let nextDiscovery =
|
||||
if self.lastDiscRequest + DiscoveryRateLimit > Moment.now():
|
||||
(self.lastDiscRequest + DiscoveryRateLimit - Moment.now())
|
||||
else:
|
||||
0.milliseconds
|
||||
|
||||
let retryDelay =
|
||||
max(secs(rand(self.pendingBlocks.retryInterval.secs)), nextDiscovery)
|
||||
|
||||
# We now wait for a bit and then retry. If the handle gets completed in the
|
||||
# meantime (cause the presence handler might have requested the block and
|
||||
# received it in the meantime), we are done. Retry delays are randomized
|
||||
# so we don't get all block loops spinning at the same time.
|
||||
await handle or sleepAsync(retryDelay)
|
||||
if handle.finished:
|
||||
break
|
||||
|
||||
# Without decrementing the retries count, this would infinitely loop
|
||||
# trying to find peers.
|
||||
self.pendingBlocks.decRetries(address)
|
||||
|
||||
# If we still don't have the block, we'll go for another cycle.
|
||||
trace "No peers for block, will retry shortly"
|
||||
continue
|
||||
|
||||
# Once again, it might happen that the block was requested to a peer
|
||||
# in the meantime. If so, we don't need to do anything. Otherwise,
|
||||
# we'll be the ones placing the request.
|
||||
let scheduledPeer =
|
||||
if not self.pendingBlocks.isRequested(address):
|
||||
let peer = self.selectPeer(peers.with)
|
||||
discard self.pendingBlocks.markRequested(address, peer.id)
|
||||
peer.blockRequestScheduled(address)
|
||||
trace "Request block from block retry loop"
|
||||
await self.sendWantBlock(@[address], peer)
|
||||
peer
|
||||
else:
|
||||
let peerId = self.pendingBlocks.getRequestPeer(address).get()
|
||||
self.peers.get(peerId)
|
||||
|
||||
if scheduledPeer.isNil:
|
||||
trace "Scheduled peer no longer available, clearing stale request", address
|
||||
self.pendingBlocks.clearRequest(address)
|
||||
continue
|
||||
|
||||
# Parks until either the block is received, or the peer times out.
|
||||
let activityTimer = scheduledPeer.activityTimer()
|
||||
await handle or activityTimer # TODO: or peerDropped
|
||||
activityTimer.cancel()
|
||||
|
||||
# XXX: we should probably not have this. Blocks should be retried
|
||||
# to infinity unless cancelled by the client.
|
||||
self.pendingBlocks.decRetries(address)
|
||||
|
||||
if handle.finished:
|
||||
trace "Handle for block finished", failed = handle.failed
|
||||
break
|
||||
else:
|
||||
# If the peer timed out, retries immediately.
|
||||
trace "Peer timed out during block request", peer = scheduledPeer.id
|
||||
codex_block_exchange_peer_timeouts_total.inc()
|
||||
await self.network.dropPeer(scheduledPeer.id)
|
||||
# Evicts peer immediately or we may end up picking it again in the
|
||||
# next retry.
|
||||
self.evictPeer(scheduledPeer.id)
|
||||
except CancelledError as exc:
|
||||
trace "Block download cancelled"
|
||||
if not handle.finished:
|
||||
await handle.cancelAndWait()
|
||||
except RetriesExhaustedError as exc:
|
||||
warn "Retries exhausted for block", address, exc = exc.msg
|
||||
codex_block_exchange_requests_failed_total.inc()
|
||||
if not handle.finished:
|
||||
handle.fail(exc)
|
||||
finally:
|
||||
self.pendingBlocks.setInFlight(address, false)
|
||||
self.pendingBlocks.clearRequest(address)
|
||||
|
||||
proc requestBlocks*(
|
||||
self: BlockExcEngine, addresses: seq[BlockAddress]
|
||||
): SafeAsyncIter[Block] =
|
||||
var handles: seq[BlockHandle]
|
||||
|
||||
# Adds all blocks to pendingBlocks before calling the first downloadInternal. This will
|
||||
# ensure that we don't send incomplete want lists.
|
||||
for address in addresses:
|
||||
if address notin self.pendingBlocks:
|
||||
handles.add(self.pendingBlocks.getWantHandle(address))
|
||||
|
||||
for address in addresses:
|
||||
self.trackedFutures.track(self.downloadInternal(address))
|
||||
|
||||
let totalHandles = handles.len
|
||||
var completed = 0
|
||||
|
||||
proc isFinished(): bool =
|
||||
completed == totalHandles
|
||||
|
||||
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
|
||||
# Be it success or failure, we're completing this future.
|
||||
let value =
|
||||
try:
|
||||
# FIXME: this is super expensive. We're doing several linear scans,
|
||||
# not to mention all the copying and callback fumbling in `one`.
|
||||
let
|
||||
handle = await one(handles)
|
||||
i = handles.find(handle)
|
||||
handles.del(i)
|
||||
success await handle
|
||||
except CancelledError as err:
|
||||
warn "Block request cancelled", addresses, err = err.msg
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
error "Error getting blocks from exchange engine", addresses, err = err.msg
|
||||
failure err
|
||||
|
||||
inc(completed)
|
||||
return value
|
||||
|
||||
return SafeAsyncIter[Block].new(genNext, isFinished)
|
||||
|
||||
proc requestBlock*(
|
||||
self: BlockExcEngine, address: BlockAddress
|
||||
@ -239,60 +512,64 @@ proc completeBlock*(self: BlockExcEngine, address: BlockAddress, blk: Block) =
|
||||
proc blockPresenceHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
|
||||
trace "Received block presence from peer", peer, len = blocks.len
|
||||
let
|
||||
peerCtx = self.peers.get(peer)
|
||||
ourWantList = toSeq(self.pendingBlocks.wantList)
|
||||
ourWantList = toHashSet(self.pendingBlocks.wantList.toSeq)
|
||||
|
||||
if peerCtx.isNil:
|
||||
return
|
||||
|
||||
peerCtx.refreshReplied()
|
||||
|
||||
for blk in blocks:
|
||||
if presence =? Presence.init(blk):
|
||||
peerCtx.setPresence(presence)
|
||||
|
||||
let
|
||||
peerHave = peerCtx.peerHave
|
||||
dontWantCids = peerHave.filterIt(it notin ourWantList)
|
||||
dontWantCids = peerHave - ourWantList
|
||||
|
||||
if dontWantCids.len > 0:
|
||||
peerCtx.cleanPresence(dontWantCids)
|
||||
peerCtx.cleanPresence(dontWantCids.toSeq)
|
||||
|
||||
let ourWantCids = ourWantList.filterIt(
|
||||
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
|
||||
not self.pendingBlocks.isInFlight(it)
|
||||
)
|
||||
self.pendingBlocks.markRequested(it, peer)
|
||||
).toSeq
|
||||
|
||||
for address in ourWantCids:
|
||||
self.pendingBlocks.setInFlight(address, true)
|
||||
self.pendingBlocks.decRetries(address)
|
||||
peerCtx.blockRequestScheduled(address)
|
||||
|
||||
if ourWantCids.len > 0:
|
||||
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
|
||||
# FIXME: this will result in duplicate requests for blocks
|
||||
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
|
||||
warn "Failed to send wantBlock to peer", peer, err = err.msg
|
||||
for address in ourWantCids:
|
||||
self.pendingBlocks.clearRequest(address, peer.some)
|
||||
|
||||
proc scheduleTasks(
|
||||
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let cids = blocksDelivery.mapIt(it.blk.cid)
|
||||
|
||||
# schedule any new peers to provide blocks to
|
||||
for p in self.peers:
|
||||
for c in cids: # for each cid
|
||||
for blockDelivery in blocksDelivery: # for each cid
|
||||
# schedule a peer if it wants at least one cid
|
||||
# and we have it in our local store
|
||||
if c in p.peerWantsCids:
|
||||
if blockDelivery.address in p.wantedBlocks:
|
||||
let cid = blockDelivery.blk.cid
|
||||
try:
|
||||
if await (c in self.localStore):
|
||||
if await (cid in self.localStore):
|
||||
# TODO: the try/except should go away once blockstore tracks exceptions
|
||||
self.scheduleTask(p)
|
||||
break
|
||||
except CancelledError as exc:
|
||||
warn "Checking local store canceled", cid = c, err = exc.msg
|
||||
warn "Checking local store canceled", cid = cid, err = exc.msg
|
||||
return
|
||||
except CatchableError as exc:
|
||||
error "Error checking local store for cid", cid = c, err = exc.msg
|
||||
error "Error checking local store for cid", cid = cid, err = exc.msg
|
||||
raiseAssert "Unexpected error checking local store for cid"
|
||||
|
||||
proc cancelBlocks(
|
||||
@ -301,28 +578,45 @@ proc cancelBlocks(
|
||||
## Tells neighboring peers that we're no longer interested in a block.
|
||||
##
|
||||
|
||||
let blocksDelivered = toHashSet(addrs)
|
||||
var scheduledCancellations: Table[PeerId, HashSet[BlockAddress]]
|
||||
|
||||
if self.peers.len == 0:
|
||||
return
|
||||
|
||||
trace "Sending block request cancellations to peers",
|
||||
addrs, peers = self.peers.peerIds
|
||||
|
||||
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
|
||||
proc dispatchCancellations(
|
||||
entry: tuple[peerId: PeerId, addresses: HashSet[BlockAddress]]
|
||||
): Future[PeerId] {.async: (raises: [CancelledError]).} =
|
||||
trace "Sending block request cancellations to peer",
|
||||
peer = entry.peerId, addresses = entry.addresses.len
|
||||
await self.network.request.sendWantCancellations(
|
||||
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
|
||||
peer = entry.peerId, addresses = entry.addresses.toSeq
|
||||
)
|
||||
|
||||
return peerCtx
|
||||
return entry.peerId
|
||||
|
||||
try:
|
||||
let (succeededFuts, failedFuts) = await allFinishedFailed[BlockExcPeerCtx](
|
||||
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
|
||||
processPeer
|
||||
)
|
||||
for peerCtx in self.peers.peers.values:
|
||||
# Do we have pending requests, towards this peer, for any of the blocks
|
||||
# that were just delivered?
|
||||
let intersection = peerCtx.blocksRequested.intersection(blocksDelivered)
|
||||
if intersection.len > 0:
|
||||
# If so, schedules a cancellation.
|
||||
scheduledCancellations[peerCtx.id] = intersection
|
||||
|
||||
if scheduledCancellations.len == 0:
|
||||
return
|
||||
|
||||
let (succeededFuts, failedFuts) = await allFinishedFailed[PeerId](
|
||||
toSeq(scheduledCancellations.pairs).map(dispatchCancellations)
|
||||
)
|
||||
|
||||
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
|
||||
peerCtx.cleanPresence(addrs)
|
||||
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerId: PeerId):
|
||||
let ctx = self.peers.get(peerId)
|
||||
if not ctx.isNil:
|
||||
ctx.cleanPresence(addrs)
|
||||
for address in scheduledCancellations[peerId]:
|
||||
ctx.blockRequestCancelled(address)
|
||||
|
||||
if failedFuts.len > 0:
|
||||
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
|
||||
@ -392,17 +686,31 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||
return success()
|
||||
|
||||
proc blocksDeliveryHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
self: BlockExcEngine,
|
||||
peer: PeerId,
|
||||
blocksDelivery: seq[BlockDelivery],
|
||||
allowSpurious: bool = false,
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
|
||||
|
||||
var validatedBlocksDelivery: seq[BlockDelivery]
|
||||
let peerCtx = self.peers.get(peer)
|
||||
|
||||
let runtimeQuota = 10.milliseconds
|
||||
var lastIdle = Moment.now()
|
||||
|
||||
for bd in blocksDelivery:
|
||||
logScope:
|
||||
peer = peer
|
||||
address = bd.address
|
||||
|
||||
try:
|
||||
# Unknown peers and unrequested blocks are dropped with a warning.
|
||||
if not allowSpurious and (peerCtx == nil or not peerCtx.blockReceived(bd.address)):
|
||||
warn "Dropping unrequested or duplicate block received from peer"
|
||||
codex_block_exchange_spurious_blocks_received.inc()
|
||||
continue
|
||||
|
||||
if err =? self.validateBlockDelivery(bd).errorOption:
|
||||
warn "Block validation failed", msg = err.msg
|
||||
continue
|
||||
@ -422,15 +730,25 @@ proc blocksDeliveryHandler*(
|
||||
).errorOption:
|
||||
warn "Unable to store proof and cid for a block"
|
||||
continue
|
||||
except CancelledError:
|
||||
trace "Block delivery handling cancelled"
|
||||
except CatchableError as exc:
|
||||
warn "Error handling block delivery", error = exc.msg
|
||||
continue
|
||||
|
||||
validatedBlocksDelivery.add(bd)
|
||||
|
||||
if (Moment.now() - lastIdle) >= runtimeQuota:
|
||||
try:
|
||||
await idleAsync()
|
||||
except CancelledError:
|
||||
discard
|
||||
except CatchableError:
|
||||
discard
|
||||
lastIdle = Moment.now()
|
||||
|
||||
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||
|
||||
let peerCtx = self.peers.get(peer)
|
||||
if peerCtx != nil:
|
||||
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
|
||||
warn "Error paying for blocks", err = err.msg
|
||||
@ -454,16 +772,17 @@ proc wantListHandler*(
|
||||
presence: seq[BlockPresence]
|
||||
schedulePeer = false
|
||||
|
||||
let runtimeQuota = 10.milliseconds
|
||||
var lastIdle = Moment.now()
|
||||
|
||||
try:
|
||||
for e in wantList.entries:
|
||||
let idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||
|
||||
logScope:
|
||||
peer = peerCtx.id
|
||||
address = e.address
|
||||
wantType = $e.wantType
|
||||
|
||||
if idx < 0: # Adding new entry to peer wants
|
||||
if e.address notin peerCtx.wantedBlocks: # Adding new entry to peer wants
|
||||
let
|
||||
have =
|
||||
try:
|
||||
@ -474,6 +793,8 @@ proc wantListHandler*(
|
||||
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||
|
||||
if e.cancel:
|
||||
# This is sort of expected if we sent the block to the peer, as we have removed
|
||||
# it from the peer's wantlist ourselves.
|
||||
trace "Received cancelation for untracked block, skipping",
|
||||
address = e.address
|
||||
continue
|
||||
@ -482,12 +803,14 @@ proc wantListHandler*(
|
||||
case e.wantType
|
||||
of WantType.WantHave:
|
||||
if have:
|
||||
trace "We HAVE the block", address = e.address
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||
)
|
||||
)
|
||||
else:
|
||||
trace "We DON'T HAVE the block", address = e.address
|
||||
if e.sendDontHave:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
@ -497,28 +820,35 @@ proc wantListHandler*(
|
||||
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
of WantType.WantBlock:
|
||||
peerCtx.peerWants.add(e)
|
||||
peerCtx.wantedBlocks.incl(e.address)
|
||||
schedulePeer = true
|
||||
codex_block_exchange_want_block_lists_received.inc()
|
||||
else: # Updating existing entry in peer wants
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
trace "Canceling want for block", address = e.address
|
||||
peerCtx.peerWants.del(idx)
|
||||
peerCtx.wantedBlocks.excl(e.address)
|
||||
trace "Canceled block request",
|
||||
address = e.address, len = peerCtx.peerWants.len
|
||||
address = e.address, len = peerCtx.wantedBlocks.len
|
||||
else:
|
||||
trace "Peer has requested a block more than once", address = e.address
|
||||
if e.wantType == WantType.WantBlock:
|
||||
schedulePeer = true
|
||||
# peer might want to ask for the same cid with
|
||||
# different want params
|
||||
trace "Updating want for block", address = e.address
|
||||
peerCtx.peerWants[idx] = e # update entry
|
||||
trace "Updated block request",
|
||||
address = e.address, len = peerCtx.peerWants.len
|
||||
|
||||
if presence.len >= PresenceBatchSize or (Moment.now() - lastIdle) >= runtimeQuota:
|
||||
if presence.len > 0:
|
||||
trace "Sending presence batch to remote", items = presence.len
|
||||
await self.network.request.sendPresence(peer, presence)
|
||||
presence = @[]
|
||||
try:
|
||||
await idleAsync()
|
||||
except CancelledError:
|
||||
discard
|
||||
lastIdle = Moment.now()
|
||||
|
||||
# Send any remaining presence messages
|
||||
if presence.len > 0:
|
||||
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||
trace "Sending final presence to remote", items = presence.len
|
||||
await self.network.request.sendPresence(peer, presence)
|
||||
|
||||
if schedulePeer:
|
||||
@ -550,7 +880,7 @@ proc paymentHandler*(
|
||||
else:
|
||||
context.paymentChannel = self.wallet.acceptChannel(payment).option
|
||||
|
||||
proc setupPeer*(
|
||||
proc peerAddedHandler*(
|
||||
self: BlockExcEngine, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Perform initial setup, such as want
|
||||
@ -560,88 +890,85 @@ proc setupPeer*(
|
||||
trace "Setting up peer", peer
|
||||
|
||||
if peer notin self.peers:
|
||||
let peerCtx = BlockExcPeerCtx(id: peer, activityTimeout: DefaultPeerActivityTimeout)
|
||||
trace "Setting up new peer", peer
|
||||
self.peers.add(BlockExcPeerCtx(id: peer))
|
||||
self.peers.add(peerCtx)
|
||||
trace "Added peer", peers = self.peers.len
|
||||
|
||||
# broadcast our want list, the other peer will do the same
|
||||
if self.pendingBlocks.wantListLen > 0:
|
||||
trace "Sending our want list to a peer", peer
|
||||
let cids = toSeq(self.pendingBlocks.wantList)
|
||||
await self.network.request.sendWantList(peer, cids, full = true)
|
||||
await self.refreshBlockKnowledge(peerCtx)
|
||||
|
||||
if address =? self.pricing .? address:
|
||||
trace "Sending account to peer", peer
|
||||
await self.network.request.sendAccount(peer, Account(address: address))
|
||||
|
||||
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
proc localLookup(
|
||||
self: BlockExcEngine, address: BlockAddress
|
||||
): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} =
|
||||
if address.leaf:
|
||||
(await self.localStore.getBlockAndProof(address.treeCid, address.index)).map(
|
||||
(blkAndProof: (Block, CodexProof)) =>
|
||||
BlockDelivery(address: address, blk: blkAndProof[0], proof: blkAndProof[1].some)
|
||||
)
|
||||
else:
|
||||
(await self.localStore.getBlock(address)).map(
|
||||
(blk: Block) => BlockDelivery(address: address, blk: blk, proof: CodexProof.none)
|
||||
)
|
||||
|
||||
trace "Dropping peer", peer
|
||||
iterator splitBatches[T](sequence: seq[T], batchSize: int): seq[T] =
|
||||
var batch: seq[T]
|
||||
for element in sequence:
|
||||
if batch.len == batchSize:
|
||||
yield batch
|
||||
batch = @[]
|
||||
batch.add(element)
|
||||
|
||||
# drop the peer from the peers table
|
||||
self.peers.remove(peer)
|
||||
if batch.len > 0:
|
||||
yield batch
|
||||
|
||||
proc taskHandler*(
|
||||
self: BlockExcEngine, task: BlockExcPeerCtx
|
||||
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
self: BlockExcEngine, peerCtx: BlockExcPeerCtx
|
||||
) {.async: (raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
# Send to the peer blocks he wants to get,
|
||||
# if they present in our local store
|
||||
|
||||
# TODO: There should be all sorts of accounting of
|
||||
# bytes sent/received here
|
||||
# Blocks that have been sent have already been picked up by other tasks and
|
||||
# should not be re-sent.
|
||||
var
|
||||
wantedBlocks = peerCtx.wantedBlocks.filterIt(not peerCtx.isBlockSent(it))
|
||||
sent: HashSet[BlockAddress]
|
||||
|
||||
var wantsBlocks =
|
||||
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
|
||||
trace "Running task for peer", peer = peerCtx.id
|
||||
|
||||
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
|
||||
for peerWant in task.peerWants.mitems:
|
||||
if peerWant.address in addresses:
|
||||
peerWant.inFlight = inFlight
|
||||
for wantedBlock in wantedBlocks:
|
||||
peerCtx.markBlockAsSent(wantedBlock)
|
||||
|
||||
if wantsBlocks.len > 0:
|
||||
# Mark wants as in-flight.
|
||||
let wantAddresses = wantsBlocks.mapIt(it.address)
|
||||
updateInFlight(wantAddresses, true)
|
||||
wantsBlocks.sort(SortOrder.Descending)
|
||||
try:
|
||||
for batch in wantedBlocks.toSeq.splitBatches(self.maxBlocksPerMessage):
|
||||
var blockDeliveries: seq[BlockDelivery]
|
||||
for wantedBlock in batch:
|
||||
# I/O is blocking so looking up blocks sequentially is fine.
|
||||
without blockDelivery =? await self.localLookup(wantedBlock), err:
|
||||
error "Error getting block from local store",
|
||||
err = err.msg, address = wantedBlock
|
||||
peerCtx.markBlockAsNotSent(wantedBlock)
|
||||
continue
|
||||
blockDeliveries.add(blockDelivery)
|
||||
sent.incl(wantedBlock)
|
||||
|
||||
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
|
||||
if e.address.leaf:
|
||||
(await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
||||
(blkAndProof: (Block, CodexProof)) =>
|
||||
BlockDelivery(
|
||||
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
|
||||
)
|
||||
)
|
||||
else:
|
||||
(await self.localStore.getBlock(e.address)).map(
|
||||
(blk: Block) =>
|
||||
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
||||
)
|
||||
if blockDeliveries.len == 0:
|
||||
continue
|
||||
|
||||
let
|
||||
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
||||
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
|
||||
if bd =? it.value:
|
||||
bd
|
||||
else:
|
||||
raiseAssert "Unexpected error in local lookup"
|
||||
|
||||
# All the wants that failed local lookup must be set to not-in-flight again.
|
||||
let
|
||||
successAddresses = blocksDelivery.mapIt(it.address)
|
||||
failedAddresses = wantAddresses.filterIt(it notin successAddresses)
|
||||
updateInFlight(failedAddresses, false)
|
||||
|
||||
if blocksDelivery.len > 0:
|
||||
trace "Sending blocks to peer",
|
||||
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
|
||||
await self.network.request.sendBlocksDelivery(task.id, blocksDelivery)
|
||||
|
||||
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
||||
|
||||
task.peerWants.keepItIf(it.address notin successAddresses)
|
||||
await self.network.request.sendBlocksDelivery(peerCtx.id, blockDeliveries)
|
||||
codex_block_exchange_blocks_sent.inc(blockDeliveries.len.int64)
|
||||
# Drops the batch from the peer's set of wanted blocks; i.e. assumes that after
|
||||
# we send the blocks, then the peer no longer wants them, so we don't need to
|
||||
# re-send them. Note that the send might still fail down the line and we will
|
||||
# have removed those anyway. At that point, we rely on the requester performing
|
||||
# a retry for the request to succeed.
|
||||
peerCtx.wantedBlocks.keepItIf(it notin sent)
|
||||
finally:
|
||||
# Better safe than sorry: if an exception does happen, we don't want to keep
|
||||
# those as sent, as it'll effectively prevent the blocks from ever being sent again.
|
||||
peerCtx.blocksSent.keepItIf(it notin wantedBlocks)
|
||||
|
||||
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## process tasks
|
||||
@ -652,11 +979,47 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
while self.blockexcRunning:
|
||||
let peerCtx = await self.taskQueue.pop()
|
||||
await self.taskHandler(peerCtx)
|
||||
except CancelledError:
|
||||
trace "block exchange task runner cancelled"
|
||||
except CatchableError as exc:
|
||||
error "error running block exchange task", error = exc.msg
|
||||
|
||||
info "Exiting blockexc task runner"
|
||||
|
||||
proc selectRandom*(
|
||||
peers: seq[BlockExcPeerCtx]
|
||||
): BlockExcPeerCtx {.gcsafe, raises: [].} =
|
||||
if peers.len == 1:
|
||||
return peers[0]
|
||||
|
||||
proc evalPeerScore(peer: BlockExcPeerCtx): float =
|
||||
let
|
||||
loadPenalty = peer.blocksRequested.len.float * 2.0
|
||||
successRate =
|
||||
if peer.exchanged > 0:
|
||||
peer.exchanged.float / (peer.exchanged + peer.blocksRequested.len).float
|
||||
else:
|
||||
0.5
|
||||
failurePenalty = (1.0 - successRate) * 5.0
|
||||
return loadPenalty + failurePenalty
|
||||
|
||||
let
|
||||
scores = peers.mapIt(evalPeerScore(it))
|
||||
maxScore = scores.max() + 1.0
|
||||
weights = scores.mapIt(maxScore - it)
|
||||
|
||||
var totalWeight = 0.0
|
||||
for w in weights:
|
||||
totalWeight += w
|
||||
|
||||
var r = rand(totalWeight)
|
||||
for i, weight in weights:
|
||||
r -= weight
|
||||
if r <= 0.0:
|
||||
return peers[i]
|
||||
|
||||
return peers[^1]
|
||||
|
||||
proc new*(
|
||||
T: type BlockExcEngine,
|
||||
localStore: BlockStore,
|
||||
@ -666,7 +1029,9 @@ proc new*(
|
||||
advertiser: Advertiser,
|
||||
peerStore: PeerCtxStore,
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
maxBlocksPerMessage = DefaultMaxBlocksPerMessage,
|
||||
concurrentTasks = DefaultConcurrentTasks,
|
||||
selectPeer: PeerSelector = selectRandom,
|
||||
): BlockExcEngine =
|
||||
## Create new block exchange engine instance
|
||||
##
|
||||
@ -679,23 +1044,13 @@ proc new*(
|
||||
wallet: wallet,
|
||||
concurrentTasks: concurrentTasks,
|
||||
trackedFutures: TrackedFutures(),
|
||||
maxBlocksPerMessage: maxBlocksPerMessage,
|
||||
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
||||
discovery: discovery,
|
||||
advertiser: advertiser,
|
||||
selectPeer: selectPeer,
|
||||
)
|
||||
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
await self.setupPeer(peerId)
|
||||
else:
|
||||
self.dropPeer(peerId)
|
||||
|
||||
if not isNil(network.switch):
|
||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc blockWantListHandler(
|
||||
peer: PeerId, wantList: WantList
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
@ -721,12 +1076,24 @@ proc new*(
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.paymentHandler(peer, payment)
|
||||
|
||||
proc peerAddedHandler(
|
||||
peer: PeerId
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.peerAddedHandler(peer)
|
||||
|
||||
proc peerDepartedHandler(
|
||||
peer: PeerId
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
self.evictPeer(peer)
|
||||
|
||||
network.handlers = BlockExcHandlers(
|
||||
onWantList: blockWantListHandler,
|
||||
onBlocksDelivery: blocksDeliveryHandler,
|
||||
onPresence: blockPresenceHandler,
|
||||
onAccount: accountHandler,
|
||||
onPayment: paymentHandler,
|
||||
onPeerJoined: peerAddedHandler,
|
||||
onPeerDeparted: peerDepartedHandler,
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
@ -34,7 +34,7 @@ declareGauge(
|
||||
|
||||
const
|
||||
DefaultBlockRetries* = 3000
|
||||
DefaultRetryInterval* = 500.millis
|
||||
DefaultRetryInterval* = 2.seconds
|
||||
|
||||
type
|
||||
RetriesExhaustedError* = object of CatchableError
|
||||
@ -42,7 +42,7 @@ type
|
||||
|
||||
BlockReq* = object
|
||||
handle*: BlockHandle
|
||||
inFlight*: bool
|
||||
requested*: ?PeerId
|
||||
blockRetries*: int
|
||||
startTime*: int64
|
||||
|
||||
@ -50,12 +50,13 @@ type
|
||||
blockRetries*: int = DefaultBlockRetries
|
||||
retryInterval*: Duration = DefaultRetryInterval
|
||||
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
||||
lastInclusion*: Moment # time at which we last included a block into our wantlist
|
||||
|
||||
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
||||
|
||||
proc getWantHandle*(
|
||||
self: PendingBlocksManager, address: BlockAddress, inFlight = false
|
||||
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
|
||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
## Add an event for a block
|
||||
##
|
||||
@ -65,11 +66,13 @@ proc getWantHandle*(
|
||||
do:
|
||||
let blk = BlockReq(
|
||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||
inFlight: inFlight,
|
||||
requested: requested,
|
||||
blockRetries: self.blockRetries,
|
||||
startTime: getMonoTime().ticks,
|
||||
)
|
||||
self.blocks[address] = blk
|
||||
self.lastInclusion = Moment.now()
|
||||
|
||||
let handle = blk.handle
|
||||
|
||||
proc cleanUpBlock(data: pointer) {.raises: [].} =
|
||||
@ -86,9 +89,9 @@ proc getWantHandle*(
|
||||
return handle
|
||||
|
||||
proc getWantHandle*(
|
||||
self: PendingBlocksManager, cid: Cid, inFlight = false
|
||||
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
|
||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
self.getWantHandle(BlockAddress.init(cid), inFlight)
|
||||
self.getWantHandle(BlockAddress.init(cid), requested)
|
||||
|
||||
proc completeWantHandle*(
|
||||
self: PendingBlocksManager, address: BlockAddress, blk: Block
|
||||
@ -121,9 +124,6 @@ proc resolve*(
|
||||
blockReq.handle.complete(bd.blk)
|
||||
|
||||
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
|
||||
|
||||
if retrievalDurationUs > 500000:
|
||||
warn "High block retrieval time", retrievalDurationUs, address = bd.address
|
||||
else:
|
||||
trace "Block handle already finished", address = bd.address
|
||||
|
||||
@ -141,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].blockRetries <= 0
|
||||
|
||||
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
|
||||
## Set inflight status for a block
|
||||
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
## Check if a block has been requested to a peer
|
||||
##
|
||||
result = false
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].requested.isSome
|
||||
|
||||
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
|
||||
## Returns the peer that requested this block
|
||||
##
|
||||
result = PeerId.none
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].requested
|
||||
|
||||
proc markRequested*(
|
||||
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
|
||||
): bool =
|
||||
## Marks this block as having been requested to a peer
|
||||
##
|
||||
|
||||
self.blocks.withValue(address, pending):
|
||||
pending[].inFlight = inFlight
|
||||
|
||||
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
## Check if a block is in flight
|
||||
##
|
||||
if self.isRequested(address):
|
||||
return false
|
||||
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].inFlight
|
||||
pending[].requested = peer.some
|
||||
return true
|
||||
|
||||
proc clearRequest*(
|
||||
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
|
||||
) =
|
||||
self.blocks.withValue(address, pending):
|
||||
if peer.isSome:
|
||||
assert peer == pending[].requested
|
||||
pending[].requested = PeerId.none
|
||||
|
||||
func contains*(self: PendingBlocksManager, cid: Cid): bool =
|
||||
BlockAddress.init(cid) in self.blocks
|
||||
|
||||
@ -35,15 +35,14 @@ const
|
||||
DefaultMaxInflight* = 100
|
||||
|
||||
type
|
||||
WantListHandler* =
|
||||
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
|
||||
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
|
||||
BlocksDeliveryHandler* =
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
|
||||
BlockPresenceHandler* =
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
|
||||
PaymentHandler* =
|
||||
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
|
||||
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
@ -51,6 +50,9 @@ type
|
||||
onPresence*: BlockPresenceHandler
|
||||
onAccount*: AccountHandler
|
||||
onPayment*: PaymentHandler
|
||||
onPeerJoined*: PeerEventHandler
|
||||
onPeerDeparted*: PeerEventHandler
|
||||
onPeerDropped*: PeerEventHandler
|
||||
|
||||
WantListSender* = proc(
|
||||
id: PeerId,
|
||||
@ -240,96 +242,116 @@ proc handlePayment(
|
||||
await network.handlers.onPayment(peer.id, payment)
|
||||
|
||||
proc rpcHandler(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||
) {.async: (raises: []).} =
|
||||
## handle rpc messages
|
||||
##
|
||||
if msg.wantList.entries.len > 0:
|
||||
b.trackedFutures.track(b.handleWantList(peer, msg.wantList))
|
||||
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
|
||||
|
||||
if msg.payload.len > 0:
|
||||
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload))
|
||||
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
|
||||
|
||||
if msg.blockPresences.len > 0:
|
||||
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences))
|
||||
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
|
||||
|
||||
if account =? Account.init(msg.account):
|
||||
b.trackedFutures.track(b.handleAccount(peer, account))
|
||||
self.trackedFutures.track(self.handleAccount(peer, account))
|
||||
|
||||
if payment =? SignedState.init(msg.payment):
|
||||
b.trackedFutures.track(b.handlePayment(peer, payment))
|
||||
self.trackedFutures.track(self.handlePayment(peer, payment))
|
||||
|
||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
## Creates or retrieves a BlockExcNetwork Peer
|
||||
##
|
||||
|
||||
if peer in b.peers:
|
||||
return b.peers.getOrDefault(peer, nil)
|
||||
if peer in self.peers:
|
||||
return self.peers.getOrDefault(peer, nil)
|
||||
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.
|
||||
async: (raises: [CancelledError])
|
||||
.} =
|
||||
try:
|
||||
trace "Getting new connection stream", peer
|
||||
return await b.switch.dial(peer, Codec)
|
||||
return await self.switch.dial(peer, Codec)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as exc:
|
||||
trace "Unable to connect to blockexc peer", exc = exc.msg
|
||||
|
||||
if not isNil(b.getConn):
|
||||
getConn = b.getConn
|
||||
if not isNil(self.getConn):
|
||||
getConn = self.getConn
|
||||
|
||||
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
|
||||
await b.rpcHandler(p, msg)
|
||||
await self.rpcHandler(p, msg)
|
||||
|
||||
# create new pubsub peer
|
||||
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
|
||||
debug "Created new blockexc peer", peer
|
||||
|
||||
b.peers[peer] = blockExcPeer
|
||||
self.peers[peer] = blockExcPeer
|
||||
|
||||
return blockExcPeer
|
||||
|
||||
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
## Perform initial setup, such as want
|
||||
## list exchange
|
||||
##
|
||||
|
||||
discard b.getOrCreatePeer(peer)
|
||||
|
||||
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||
## Dial a peer
|
||||
##
|
||||
|
||||
if b.isSelf(peer.peerId):
|
||||
if self.isSelf(peer.peerId):
|
||||
trace "Skipping dialing self", peer = peer.peerId
|
||||
return
|
||||
|
||||
if peer.peerId in b.peers:
|
||||
if peer.peerId in self.peers:
|
||||
trace "Already connected to peer", peer = peer.peerId
|
||||
return
|
||||
|
||||
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||
|
||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
proc dropPeer*(
|
||||
self: BlockExcNetwork, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Dropping peer", peer
|
||||
|
||||
try:
|
||||
if not self.switch.isNil:
|
||||
await self.switch.disconnect(peer)
|
||||
except CatchableError as error:
|
||||
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
|
||||
|
||||
if not self.handlers.onPeerDropped.isNil:
|
||||
await self.handlers.onPeerDropped(peer)
|
||||
|
||||
proc handlePeerJoined*(
|
||||
self: BlockExcNetwork, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
discard self.getOrCreatePeer(peer)
|
||||
if not self.handlers.onPeerJoined.isNil:
|
||||
await self.handlers.onPeerJoined(peer)
|
||||
|
||||
proc handlePeerDeparted*(
|
||||
self: BlockExcNetwork, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
trace "Dropping peer", peer
|
||||
b.peers.del(peer)
|
||||
trace "Cleaning up departed peer", peer
|
||||
self.peers.del(peer)
|
||||
if not self.handlers.onPeerDeparted.isNil:
|
||||
await self.handlers.onPeerDeparted(peer)
|
||||
|
||||
method init*(self: BlockExcNetwork) =
|
||||
method init*(self: BlockExcNetwork) {.raises: [].} =
|
||||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
self.setupPeer(peerId)
|
||||
await self.handlePeerJoined(peerId)
|
||||
elif event.kind == PeerEventKind.Left:
|
||||
await self.handlePeerDeparted(peerId)
|
||||
else:
|
||||
self.dropPeer(peerId)
|
||||
warn "Unknown peer event", event
|
||||
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
@ -24,10 +24,9 @@ logScope:
|
||||
const DefaultYieldInterval = 50.millis
|
||||
|
||||
type
|
||||
ConnProvider* =
|
||||
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
|
||||
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
|
||||
|
||||
NetworkPeer* = ref object of RootObj
|
||||
id*: PeerId
|
||||
@ -65,7 +64,9 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
|
||||
except CatchableError as err:
|
||||
warn "Exception in blockexc read loop", msg = err.msg
|
||||
finally:
|
||||
trace "Detaching read loop", peer = self.id, connId = conn.oid
|
||||
warn "Detaching read loop", peer = self.id, connId = conn.oid
|
||||
if self.sendConn == conn:
|
||||
self.sendConn = nil
|
||||
await conn.close()
|
||||
|
||||
proc connect*(
|
||||
@ -89,7 +90,12 @@ proc send*(
|
||||
return
|
||||
|
||||
trace "Sending message", peer = self.id, connId = conn.oid
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
try:
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
except CatchableError as err:
|
||||
if self.sendConn == conn:
|
||||
self.sendConn = nil
|
||||
raise newException(LPStreamError, "Failed to send message: " & err.msg)
|
||||
|
||||
func new*(
|
||||
T: type NetworkPeer,
|
||||
|
||||
@ -25,28 +25,77 @@ import ../../logutils
|
||||
|
||||
export payments, nitro
|
||||
|
||||
const
|
||||
MinRefreshInterval = 1.seconds
|
||||
MaxRefreshBackoff = 36 # 36 seconds
|
||||
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
|
||||
|
||||
type BlockExcPeerCtx* = ref object of RootObj
|
||||
id*: PeerId
|
||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||
peerWants*: seq[WantListEntry] # remote peers want lists
|
||||
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
|
||||
exchanged*: int # times peer has exchanged with us
|
||||
lastExchange*: Moment # last time peer has exchanged with us
|
||||
refreshInProgress*: bool # indicates if a refresh is in progress
|
||||
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
|
||||
refreshBackoff*: int = 1 # backoff factor for refresh requests
|
||||
account*: ?Account # ethereum account of this peer
|
||||
paymentChannel*: ?ChannelId # payment channel id
|
||||
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
|
||||
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
|
||||
lastExchange*: Moment # last time peer has sent us a block
|
||||
activityTimeout*: Duration
|
||||
lastSentWants*: HashSet[BlockAddress]
|
||||
# track what wantList we last sent for delta updates
|
||||
|
||||
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
|
||||
toSeq(self.blocks.keys)
|
||||
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
|
||||
let staleness =
|
||||
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
|
||||
|
||||
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
|
||||
if staleness and self.refreshInProgress:
|
||||
trace "Cleaning up refresh state", peer = self.id
|
||||
self.refreshInProgress = false
|
||||
self.refreshBackoff = 1
|
||||
|
||||
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
|
||||
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
|
||||
staleness
|
||||
|
||||
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
address in self.blocksSent
|
||||
|
||||
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
self.blocksSent.incl(address)
|
||||
|
||||
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
self.blocksSent.excl(address)
|
||||
|
||||
proc refreshRequested*(self: BlockExcPeerCtx) =
|
||||
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
|
||||
self.refreshInProgress = true
|
||||
self.lastRefresh = Moment.now()
|
||||
|
||||
proc refreshReplied*(self: BlockExcPeerCtx) =
|
||||
self.refreshInProgress = false
|
||||
self.lastRefresh = Moment.now()
|
||||
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
|
||||
|
||||
proc havesUpdated(self: BlockExcPeerCtx) =
|
||||
self.refreshBackoff = 1
|
||||
|
||||
proc wantsUpdated*(self: BlockExcPeerCtx) =
|
||||
self.refreshBackoff = 1
|
||||
|
||||
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
|
||||
# XXX: this is ugly an inefficient, but since those will typically
|
||||
# be used in "joins", it's better to pay the price here and have
|
||||
# a linear join than to not do it and have a quadratic join.
|
||||
toHashSet(self.blocks.keys.toSeq)
|
||||
|
||||
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
address in self.blocks
|
||||
|
||||
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
|
||||
if presence.address notin self.blocks:
|
||||
self.havesUpdated()
|
||||
|
||||
self.blocks[presence.address] = presence
|
||||
|
||||
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
||||
@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
|
||||
price += precense[].price
|
||||
|
||||
price
|
||||
|
||||
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
## Adds a block the set of blocks that have been requested to this peer
|
||||
## (its request schedule).
|
||||
if self.blocksRequested.len == 0:
|
||||
self.lastExchange = Moment.now()
|
||||
self.blocksRequested.incl(address)
|
||||
|
||||
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
## Removes a block from the set of blocks that have been requested to this peer
|
||||
## (its request schedule).
|
||||
self.blocksRequested.excl(address)
|
||||
|
||||
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
|
||||
let wasRequested = address in self.blocksRequested
|
||||
self.blocksRequested.excl(address)
|
||||
self.lastExchange = Moment.now()
|
||||
wasRequested
|
||||
|
||||
proc activityTimer*(
|
||||
self: BlockExcPeerCtx
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
## This is called by the block exchange when a block is scheduled for this peer.
|
||||
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
|
||||
## and the peer is dropped. Note that ANY block that the peer sends will reset this
|
||||
## timer for all blocks.
|
||||
##
|
||||
while true:
|
||||
let idleTime = Moment.now() - self.lastExchange
|
||||
if idleTime > self.activityTimeout:
|
||||
return
|
||||
|
||||
await sleepAsync(self.activityTimeout - idleTime)
|
||||
|
||||
@ -62,21 +62,23 @@ func len*(self: PeerCtxStore): int =
|
||||
self.peers.len
|
||||
|
||||
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
|
||||
toSeq(self.peers.values).filterIt(address in it.peerHave)
|
||||
|
||||
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
# FIXME: this is way slower and can end up leading to unexpected performance loss.
|
||||
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
|
||||
|
||||
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
|
||||
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
|
||||
|
||||
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
|
||||
# FIXME: this is way slower and can end up leading to unexpected performance loss.
|
||||
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
|
||||
|
||||
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
|
||||
var res: PeersForBlock = (@[], @[])
|
||||
for peer in self:
|
||||
if peer.peerHave.anyIt(it == address):
|
||||
if address in peer:
|
||||
res.with.add(peer)
|
||||
else:
|
||||
res.without.add(peer)
|
||||
|
||||
@ -9,7 +9,6 @@
|
||||
|
||||
import std/hashes
|
||||
import std/sequtils
|
||||
import pkg/stew/endians2
|
||||
|
||||
import message
|
||||
|
||||
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
|
||||
export BlockDelivery, BlockPresenceType, BlockPresence
|
||||
export AccountMessage, StateChannelUpdate
|
||||
|
||||
proc hash*(a: BlockAddress): Hash =
|
||||
if a.leaf:
|
||||
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||
hash(data)
|
||||
else:
|
||||
hash(a.cid.data.buffer)
|
||||
|
||||
proc hash*(e: WantListEntry): Hash =
|
||||
hash(e.address)
|
||||
|
||||
|
||||
@ -25,11 +25,15 @@ type
|
||||
|
||||
WantListEntry* = object
|
||||
address*: BlockAddress
|
||||
# XXX: I think explicit priority is pointless as the peer will request
|
||||
# the blocks in the order it wants to receive them, and all we have to
|
||||
# do is process those in the same order as we send them back. It also
|
||||
# complicates things for no reason at the moment, as the priority is
|
||||
# always set to 0.
|
||||
priority*: int32 # The priority (normalized). default to 1
|
||||
cancel*: bool # Whether this revokes an entry
|
||||
wantType*: WantType # Note: defaults to enum 0, ie Block
|
||||
sendDontHave*: bool # Note: defaults to false
|
||||
inFlight*: bool # Whether block sending is in progress. Not serialized.
|
||||
|
||||
WantList* = object
|
||||
entries*: seq[WantListEntry] # A list of wantList entries
|
||||
|
||||
@ -9,16 +9,14 @@
|
||||
|
||||
import std/tables
|
||||
import std/sugar
|
||||
import std/hashes
|
||||
|
||||
export tables
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p/[cid, multicodec, multihash]
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stew/[byteutils, endians2]
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
|
||||
else:
|
||||
"cid: " & $a.cid
|
||||
|
||||
proc hash*(a: BlockAddress): Hash =
|
||||
if a.leaf:
|
||||
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
|
||||
hash(data)
|
||||
else:
|
||||
hash(a.cid.data.buffer)
|
||||
|
||||
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||
if a.leaf: a.treeCid else: a.cid
|
||||
|
||||
|
||||
@ -9,10 +9,7 @@
|
||||
|
||||
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
@ -31,7 +28,7 @@ type
|
||||
ChunkerError* = object of CatchableError
|
||||
ChunkBuffer* = ptr UncheckedArray[byte]
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
|
||||
gcsafe, async: (raises: [ChunkerError, CancelledError])
|
||||
async: (raises: [ChunkerError, CancelledError])
|
||||
.}
|
||||
|
||||
# Reader that splits input data into fixed-size chunks
|
||||
@ -77,7 +74,7 @@ proc new*(
|
||||
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var res = 0
|
||||
try:
|
||||
while res < len:
|
||||
@ -105,7 +102,7 @@ proc new*(
|
||||
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var total = 0
|
||||
try:
|
||||
while total < len:
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/stew/endians2
|
||||
import pkg/upraises
|
||||
import pkg/stint
|
||||
|
||||
type
|
||||
@ -8,10 +9,12 @@ type
|
||||
SecondsSince1970* = int64
|
||||
Timeout* = object of CatchableError
|
||||
|
||||
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} =
|
||||
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
||||
method waitUntil*(
|
||||
clock: Clock, time: SecondsSince1970
|
||||
) {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method start*(clock: Clock) {.base, async.} =
|
||||
|
||||
@ -12,6 +12,7 @@ import std/strutils
|
||||
import std/os
|
||||
import std/tables
|
||||
import std/cpuinfo
|
||||
import std/net
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/taskpools
|
||||
@ -21,7 +22,6 @@ import pkg/confutils
|
||||
import pkg/confutils/defs
|
||||
import pkg/nitro
|
||||
import pkg/stew/io2
|
||||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/datastore
|
||||
import pkg/ethers except Rng
|
||||
import pkg/stew/io2
|
||||
@ -56,11 +56,21 @@ type
|
||||
codexNode: CodexNodeRef
|
||||
repoStore: RepoStore
|
||||
maintenance: BlockMaintainer
|
||||
taskPool: Taskpool
|
||||
taskpool: Taskpool
|
||||
isStarted: bool
|
||||
|
||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||
EthWallet = ethers.Wallet
|
||||
|
||||
func config*(self: CodexServer): CodexConf =
|
||||
return self.config
|
||||
|
||||
func node*(self: CodexServer): CodexNodeRef =
|
||||
return self.codexNode
|
||||
|
||||
func repoStore*(self: CodexServer): RepoStore =
|
||||
return self.repoStore
|
||||
|
||||
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||
var sleepTime = 1
|
||||
trace "Checking sync state of Ethereum provider..."
|
||||
@ -159,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
trace "Starting codex node", config = $s.config
|
||||
if s.isStarted:
|
||||
warn "Codex server already started, skipping"
|
||||
return
|
||||
|
||||
trace "Starting codex node", config = $s.config
|
||||
await s.repoStore.start()
|
||||
|
||||
s.maintenance.start()
|
||||
|
||||
await s.codexNode.switch.start()
|
||||
@ -175,27 +189,55 @@ proc start*(s: CodexServer) {.async.} =
|
||||
|
||||
await s.bootstrapInteractions()
|
||||
await s.codexNode.start()
|
||||
s.restServer.start()
|
||||
|
||||
if s.restServer != nil:
|
||||
s.restServer.start()
|
||||
|
||||
s.isStarted = true
|
||||
|
||||
proc stop*(s: CodexServer) {.async.} =
|
||||
if not s.isStarted:
|
||||
warn "Codex is not started"
|
||||
return
|
||||
|
||||
notice "Stopping codex node"
|
||||
|
||||
let res = await noCancel allFinishedFailed[void](
|
||||
var futures =
|
||||
@[
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop(),
|
||||
]
|
||||
)
|
||||
|
||||
if s.restServer != nil:
|
||||
futures.add(s.restServer.stop())
|
||||
|
||||
let res = await noCancel allFinishedFailed[void](futures)
|
||||
|
||||
if res.failure.len > 0:
|
||||
error "Failed to stop codex node", failures = res.failure.len
|
||||
raiseAssert "Failed to stop codex node"
|
||||
|
||||
if not s.taskPool.isNil:
|
||||
s.taskPool.shutdown()
|
||||
proc close*(s: CodexServer) {.async.} =
|
||||
var futures = @[s.codexNode.close(), s.repoStore.close()]
|
||||
|
||||
let res = await noCancel allFinishedFailed[void](futures)
|
||||
|
||||
if not s.taskpool.isNil:
|
||||
try:
|
||||
s.taskpool.shutdown()
|
||||
except Exception as exc:
|
||||
error "Failed to stop the taskpool", failures = res.failure.len
|
||||
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
|
||||
|
||||
if res.failure.len > 0:
|
||||
error "Failed to close codex node", failures = res.failure.len
|
||||
raiseAssert "Failed to close codex node"
|
||||
|
||||
proc shutdown*(server: CodexServer) {.async.} =
|
||||
await server.stop()
|
||||
await server.close()
|
||||
|
||||
proc new*(
|
||||
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
||||
@ -211,7 +253,7 @@ proc new*(
|
||||
.withMaxConnections(config.maxPeers)
|
||||
.withAgentVersion(config.agentString)
|
||||
.withSignedPeerRecord(true)
|
||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
||||
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
|
||||
.build()
|
||||
|
||||
var
|
||||
@ -295,7 +337,7 @@ proc new*(
|
||||
)
|
||||
|
||||
peerStore = PeerCtxStore.new()
|
||||
pendingBlocks = PendingBlocksManager.new()
|
||||
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
|
||||
advertiser = Advertiser.new(repoStore, discovery)
|
||||
blockDiscovery =
|
||||
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
@ -320,10 +362,13 @@ proc new*(
|
||||
taskPool = taskPool,
|
||||
)
|
||||
|
||||
var restServer: RestServerRef = nil
|
||||
|
||||
if config.apiBindAddress.isSome:
|
||||
restServer = RestServerRef
|
||||
.new(
|
||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||
initTAddress(config.apiBindAddress, config.apiPort),
|
||||
initTAddress(config.apiBindAddress.get(), config.apiPort),
|
||||
bufferSize = (1024 * 64),
|
||||
maxRequestBodySize = int.high,
|
||||
)
|
||||
|
||||
138
codex/conf.nim
138
codex/conf.nim
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
|
||||
{.pop.}
|
||||
|
||||
import std/options
|
||||
import std/parseutils
|
||||
import std/strutils
|
||||
import std/typetraits
|
||||
import std/net
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles/helpers
|
||||
@ -27,13 +29,12 @@ import pkg/confutils/std/net
|
||||
import pkg/toml_serialization
|
||||
import pkg/metrics
|
||||
import pkg/metrics/chronos_httpserver
|
||||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/stew/shims/parseutils
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/base64
|
||||
|
||||
import ./codextypes
|
||||
import ./discovery
|
||||
@ -46,13 +47,14 @@ import ./utils/natutils
|
||||
|
||||
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
|
||||
from ./validationconfig import MaxSlots, ValidationGroups
|
||||
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
|
||||
|
||||
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
||||
export ValidationGroups, MaxSlots
|
||||
|
||||
export
|
||||
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
|
||||
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
|
||||
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
|
||||
|
||||
type ThreadCount* = distinct Natural
|
||||
|
||||
@ -202,8 +204,10 @@ type
|
||||
.}: string
|
||||
|
||||
apiBindAddress* {.
|
||||
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
|
||||
.}: string
|
||||
desc: "The REST API bind address",
|
||||
defaultValue: "127.0.0.1".some,
|
||||
name: "api-bindaddr"
|
||||
.}: Option[string]
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
@ -261,6 +265,13 @@ type
|
||||
name: "block-mn"
|
||||
.}: int
|
||||
|
||||
blockRetries* {.
|
||||
desc: "Number of times to retry fetching a block before giving up",
|
||||
defaultValue: DefaultBlockRetries,
|
||||
defaultValueDesc: $DefaultBlockRetries,
|
||||
name: "block-retries"
|
||||
.}: int
|
||||
|
||||
cacheSize* {.
|
||||
desc:
|
||||
"The size of the block cache, 0 disables the cache - " &
|
||||
@ -474,7 +485,7 @@ func prover*(self: CodexConf): bool =
|
||||
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
||||
|
||||
proc getCodexVersion(): string =
|
||||
let tag = strip(staticExec("git tag"))
|
||||
let tag = strip(staticExec("git describe --tags --abbrev=0"))
|
||||
if tag.isEmptyOrWhitespace:
|
||||
return "untagged build"
|
||||
return tag
|
||||
@ -503,62 +514,80 @@ const
|
||||
|
||||
proc parseCmdArg*(
|
||||
T: typedesc[MultiAddress], input: string
|
||||
): MultiAddress {.upraises: [ValueError].} =
|
||||
): MultiAddress {.raises: [ValueError].} =
|
||||
var ma: MultiAddress
|
||||
try:
|
||||
let res = MultiAddress.init(input)
|
||||
if res.isOk:
|
||||
ma = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input = input, error = res.error()
|
||||
fatal "Invalid MultiAddress", input = input, error = res.error()
|
||||
quit QuitFailure
|
||||
except LPError as exc:
|
||||
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
|
||||
fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
|
||||
quit QuitFailure
|
||||
ma
|
||||
|
||||
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} =
|
||||
let count = parseInt(input)
|
||||
if count != 0 and count < 2:
|
||||
warn "Invalid number of threads", input = input
|
||||
quit QuitFailure
|
||||
ThreadCount(count)
|
||||
proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
|
||||
try:
|
||||
let count = parseInt(p)
|
||||
if count != 0 and count < 2:
|
||||
return err("Invalid number of threads: " & p)
|
||||
return ok(ThreadCount(count))
|
||||
except ValueError as e:
|
||||
return err("Invalid number of threads: " & p & ", error=" & e.msg)
|
||||
|
||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||
proc parseCmdArg*(T: type ThreadCount, input: string): T =
|
||||
let val = ThreadCount.parse(input)
|
||||
if val.isErr:
|
||||
fatal "Cannot parse the thread count.", input = input, error = val.error()
|
||||
quit QuitFailure
|
||||
return val.get()
|
||||
|
||||
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
|
||||
var res: SignedPeerRecord
|
||||
try:
|
||||
if not res.fromURI(uri):
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri
|
||||
quit QuitFailure
|
||||
except LPError as exc:
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||
quit QuitFailure
|
||||
except CatchableError as exc:
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||
quit QuitFailure
|
||||
res
|
||||
if not res.fromURI(p):
|
||||
return err("The uri is not a valid SignedPeerRecord: " & p)
|
||||
return ok(res)
|
||||
except LPError, Base64Error:
|
||||
let e = getCurrentException()
|
||||
return err(e.msg)
|
||||
|
||||
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
|
||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||
let res = SignedPeerRecord.parse(uri)
|
||||
if res.isErr:
|
||||
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
|
||||
quit QuitFailure
|
||||
return res.get()
|
||||
|
||||
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
|
||||
case p.toLowerAscii
|
||||
of "any":
|
||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
|
||||
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
|
||||
of "none":
|
||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
|
||||
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
|
||||
of "upnp":
|
||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
|
||||
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
|
||||
of "pmp":
|
||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
|
||||
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
|
||||
else:
|
||||
if p.startsWith("extip:"):
|
||||
try:
|
||||
let ip = parseIpAddress(p[6 ..^ 1])
|
||||
NatConfig(hasExtIp: true, extIp: ip)
|
||||
return ok(NatConfig(hasExtIp: true, extIp: ip))
|
||||
except ValueError:
|
||||
let error = "Not a valid IP address: " & p[6 ..^ 1]
|
||||
raise newException(ValueError, error)
|
||||
return err(error)
|
||||
else:
|
||||
let error = "Not a valid NAT option: " & p
|
||||
raise newException(ValueError, error)
|
||||
return err("Not a valid NAT option: " & p)
|
||||
|
||||
proc parseCmdArg*(T: type NatConfig, p: string): T =
|
||||
let res = NatConfig.parse(p)
|
||||
if res.isErr:
|
||||
fatal "Cannot parse the NAT config.", error = res.error(), input = p
|
||||
quit QuitFailure
|
||||
return res.get()
|
||||
|
||||
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
||||
return @[]
|
||||
@ -566,25 +595,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
||||
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
||||
EthAddress.init($address).get()
|
||||
|
||||
proc parseCmdArg*(T: type NBytes, val: string): T =
|
||||
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
|
||||
var num = 0'i64
|
||||
let count = parseSize(val, num, alwaysBin = true)
|
||||
let count = parseSize(p, num, alwaysBin = true)
|
||||
if count == 0:
|
||||
warn "Invalid number of bytes", nbytes = val
|
||||
return err("Invalid number of bytes: " & p)
|
||||
return ok(NBytes(num))
|
||||
|
||||
proc parseCmdArg*(T: type NBytes, val: string): T =
|
||||
let res = NBytes.parse(val)
|
||||
if res.isErr:
|
||||
fatal "Cannot parse NBytes.", error = res.error(), input = val
|
||||
quit QuitFailure
|
||||
NBytes(num)
|
||||
return res.get()
|
||||
|
||||
proc parseCmdArg*(T: type Duration, val: string): T =
|
||||
var dur: Duration
|
||||
let count = parseDuration(val, dur)
|
||||
if count == 0:
|
||||
warn "Cannot parse duration", dur = dur
|
||||
fatal "Cannot parse duration", dur = dur
|
||||
quit QuitFailure
|
||||
dur
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var EthAddress
|
||||
) {.upraises: [SerializationError, IOError].} =
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
val = EthAddress.init(r.readValue(string)).get()
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||
@ -595,7 +630,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||
try:
|
||||
val = SignedPeerRecord.parseCmdArg(uri)
|
||||
except LPError as err:
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
|
||||
fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
|
||||
quit QuitFailure
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||
@ -607,12 +642,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||
if res.isOk:
|
||||
val = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input = input, error = res.error()
|
||||
fatal "Invalid MultiAddress", input = input, error = res.error()
|
||||
quit QuitFailure
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var NBytes
|
||||
) {.upraises: [SerializationError, IOError].} =
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var value = 0'i64
|
||||
var str = r.readValue(string)
|
||||
let count = parseSize(str, value, alwaysBin = true)
|
||||
@ -623,7 +658,7 @@ proc readValue*(
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var ThreadCount
|
||||
) {.upraises: [SerializationError, IOError].} =
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var str = r.readValue(string)
|
||||
try:
|
||||
val = parseCmdArg(ThreadCount, str)
|
||||
@ -632,7 +667,7 @@ proc readValue*(
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var Duration
|
||||
) {.upraises: [SerializationError, IOError].} =
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var str = r.readValue(string)
|
||||
var dur: Duration
|
||||
let count = parseDuration(str, dur)
|
||||
@ -699,7 +734,7 @@ proc stripAnsi*(v: string): string =
|
||||
|
||||
res
|
||||
|
||||
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
|
||||
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
|
||||
# Updates log levels (without clearing old ones)
|
||||
let directives = logLevel.split(";")
|
||||
try:
|
||||
@ -779,15 +814,6 @@ proc setupLogging*(conf: CodexConf) =
|
||||
else:
|
||||
defaultChroniclesStream.outputs[0].writer = writer
|
||||
|
||||
try:
|
||||
updateLogLevel(conf.logLevel)
|
||||
except ValueError as err:
|
||||
try:
|
||||
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
|
||||
except IOError:
|
||||
echo "Invalid value for --log-level. " & err.msg
|
||||
quit QuitFailure
|
||||
|
||||
proc setupMetrics*(config: CodexConf) =
|
||||
if config.metricsEnabled:
|
||||
let metricsAddress = config.metricsAddress
|
||||
|
||||
8
codex/contentids_exts.nim
Normal file
8
codex/contentids_exts.nim
Normal file
@ -0,0 +1,8 @@
|
||||
const ContentIdsExts = [
|
||||
multiCodec("codex-root"),
|
||||
multiCodec("codex-manifest"),
|
||||
multiCodec("codex-block"),
|
||||
multiCodec("codex-slot-root"),
|
||||
multiCodec("codex-proving-root"),
|
||||
multiCodec("codex-slot-cell"),
|
||||
]
|
||||
@ -1,3 +1,5 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/times
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
@ -72,7 +74,9 @@ method now*(clock: OnChainClock): SecondsSince1970 =
|
||||
doAssert clock.started, "clock should be started before calling now()"
|
||||
return toUnix(getTime() + clock.offset)
|
||||
|
||||
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
|
||||
method waitUntil*(
|
||||
clock: OnChainClock, time: SecondsSince1970
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
while (let difference = time - clock.now(); difference > 0):
|
||||
clock.newBlock.clear()
|
||||
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import std/strformat
|
||||
import std/strutils
|
||||
import pkg/ethers
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/lrucache
|
||||
import ../utils/exceptions
|
||||
@ -436,7 +435,7 @@ method canReserveSlot*(
|
||||
method subscribeRequests*(
|
||||
market: OnChainMarket, callback: OnRequest
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in Request subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -450,7 +449,7 @@ method subscribeRequests*(
|
||||
method subscribeSlotFilled*(
|
||||
market: OnChainMarket, callback: OnSlotFilled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -477,7 +476,7 @@ method subscribeSlotFilled*(
|
||||
method subscribeSlotFreed*(
|
||||
market: OnChainMarket, callback: OnSlotFreed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -491,7 +490,7 @@ method subscribeSlotFreed*(
|
||||
method subscribeSlotReservationsFull*(
|
||||
market: OnChainMarket, callback: OnSlotReservationsFull
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotReservationsFull subscription",
|
||||
msg = eventErr.msg
|
||||
@ -506,7 +505,7 @@ method subscribeSlotReservationsFull*(
|
||||
method subscribeFulfillment(
|
||||
market: OnChainMarket, callback: OnFulfillment
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -520,7 +519,7 @@ method subscribeFulfillment(
|
||||
method subscribeFulfillment(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -535,7 +534,7 @@ method subscribeFulfillment(
|
||||
method subscribeRequestCancelled*(
|
||||
market: OnChainMarket, callback: OnRequestCancelled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -549,7 +548,7 @@ method subscribeRequestCancelled*(
|
||||
method subscribeRequestCancelled*(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -564,7 +563,7 @@ method subscribeRequestCancelled*(
|
||||
method subscribeRequestFailed*(
|
||||
market: OnChainMarket, callback: OnRequestFailed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -578,7 +577,7 @@ method subscribeRequestFailed*(
|
||||
method subscribeRequestFailed*(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
@ -593,7 +592,7 @@ method subscribeRequestFailed*(
|
||||
method subscribeProofSubmission*(
|
||||
market: OnChainMarket, callback: OnProofSubmitted
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
|
||||
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
@ -2,7 +2,7 @@ import std/hashes
|
||||
import std/sequtils
|
||||
import std/typetraits
|
||||
import pkg/contractabi
|
||||
import pkg/nimcrypto
|
||||
import pkg/nimcrypto/keccak
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
|
||||
@ -10,13 +10,13 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/algorithm
|
||||
import std/net
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/shims/net
|
||||
import pkg/contractabi/address as ca
|
||||
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
||||
from pkg/nimcrypto import keccak256
|
||||
@ -43,6 +43,7 @@ type Discovery* = ref object of RootObj
|
||||
# record to advertice node connection information, this carry any
|
||||
# address that the node can be connected on
|
||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||
isStarted: bool
|
||||
|
||||
proc toNodeId*(cid: Cid): NodeId =
|
||||
## Cid to discovery id
|
||||
@ -157,7 +158,7 @@ method provide*(
|
||||
|
||||
method removeProvider*(
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
|
||||
): Future[void] {.base, async: (raises: [CancelledError]).} =
|
||||
## Remove provider from providers table
|
||||
##
|
||||
|
||||
@ -203,10 +204,15 @@ proc start*(d: Discovery) {.async: (raises: []).} =
|
||||
try:
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
d.isStarted = true
|
||||
except CatchableError as exc:
|
||||
error "Error starting discovery", exc = exc.msg
|
||||
|
||||
proc stop*(d: Discovery) {.async: (raises: []).} =
|
||||
if not d.isStarted:
|
||||
warn "Discovery not started, skipping stop"
|
||||
return
|
||||
|
||||
try:
|
||||
await noCancel d.protocol.closeWait()
|
||||
except CatchableError as exc:
|
||||
|
||||
@ -7,10 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import ../stores
|
||||
|
||||
|
||||
@ -7,10 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/[sugar, atomics, sequtils]
|
||||
|
||||
|
||||
@ -9,11 +9,9 @@
|
||||
|
||||
# This module implements serialization and deserialization of Manifest
|
||||
|
||||
import pkg/upraises
|
||||
import times
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/tables
|
||||
import std/sequtils
|
||||
|
||||
@ -9,10 +9,7 @@
|
||||
|
||||
# This module defines all operations on Manifest
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/libp2p/[cid, multihash, multicodec]
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import pkg/chronos
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/ethers/erc20
|
||||
import ./contracts/requests
|
||||
@ -23,15 +22,15 @@ type
|
||||
ProofInvalidError* = object of MarketError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* =
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSlotReservationsFull* =
|
||||
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
|
||||
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
|
||||
ProofChallenge* = array[32, byte]
|
||||
|
||||
# Marketplace events -- located here due to the Market abstraction
|
||||
@ -275,7 +274,7 @@ method subscribeProofSubmission*(
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
|
||||
method unsubscribe*(subscription: Subscription) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
|
||||
@ -7,10 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
|
||||
@ -21,12 +21,10 @@ import pkg/chronos/threadsync
|
||||
import ../../utils
|
||||
import ../../rng
|
||||
import ../../errors
|
||||
import ../../blocktype
|
||||
import ../../codextypes
|
||||
|
||||
from ../../utils/digest import digestBytes
|
||||
|
||||
import ../../utils/uniqueptr
|
||||
|
||||
import ../merkletree
|
||||
|
||||
export merkletree
|
||||
@ -45,36 +43,12 @@ type
|
||||
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
|
||||
ByteProof* = MerkleProof[ByteHash, ByteTreeKey]
|
||||
|
||||
CodexTreeTask* = MerkleTask[ByteHash, ByteTreeKey]
|
||||
|
||||
CodexTree* = ref object of ByteTree
|
||||
mcodec*: MultiCodec
|
||||
|
||||
CodexProof* = ref object of ByteProof
|
||||
mcodec*: MultiCodec
|
||||
|
||||
# CodeHashes is not exported from libp2p
|
||||
# So we need to recreate it instead of
|
||||
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
|
||||
for item in HashesList:
|
||||
result[item.mcodec] = item
|
||||
|
||||
const CodeHashes = initMultiHashCodeTable()
|
||||
|
||||
func mhash*(mcodec: MultiCodec): ?!MHash =
|
||||
let mhash = CodeHashes.getOrDefault(mcodec)
|
||||
|
||||
if isNil(mhash.coder):
|
||||
return failure "Invalid multihash codec"
|
||||
|
||||
success mhash
|
||||
|
||||
func digestSize*(self: (CodexTree or CodexProof)): int =
|
||||
## Number of leaves
|
||||
##
|
||||
|
||||
self.mhash.size
|
||||
|
||||
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
||||
var proof = CodexProof(mcodec: self.mcodec)
|
||||
|
||||
@ -134,84 +108,45 @@ proc `$`*(self: CodexProof): string =
|
||||
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
|
||||
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
|
||||
|
||||
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
|
||||
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
|
||||
## Compress two hashes
|
||||
##
|
||||
|
||||
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||
# See: https://github.com/codex-storage/nim-codex/issues/1162
|
||||
|
||||
let input = @x & @y & @[key.byte]
|
||||
var digest = hashes.sha256.hash(input)
|
||||
let digest = ?MultiHash.digest(codec, input).mapFailure
|
||||
success digest.digestBytes
|
||||
|
||||
success @digest
|
||||
|
||||
func init*(
|
||||
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
||||
): ?!CodexTree =
|
||||
func initTree(mcodec: MultiCodec, leaves: openArray[ByteHash]): ?!CodexTree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mhash = ?mcodec.mhash()
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
Zero: ByteHash = newSeq[byte](mhash.size)
|
||||
compress(x, y, key, mcodec)
|
||||
digestSize = ?mcodec.digestSize.mapFailure
|
||||
Zero: ByteHash = newSeq[byte](digestSize)
|
||||
|
||||
if mhash.size != leaves[0].len:
|
||||
if digestSize != leaves[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||
|
||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
var self = CodexTree(mcodec: mcodec)
|
||||
?self.prepare(compressor, Zero, leaves)
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
||||
): ?!CodexTree =
|
||||
let tree = ?initTree(mcodec, leaves)
|
||||
?tree.compute()
|
||||
success tree
|
||||
|
||||
proc init*(
|
||||
_: type CodexTree,
|
||||
tp: Taskpool,
|
||||
mcodec: MultiCodec = Sha256HashCodec,
|
||||
leaves: seq[ByteHash],
|
||||
): Future[?!CodexTree] {.async: (raises: [CancelledError]).} =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let
|
||||
mhash = ?mcodec.mhash()
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
Zero: ByteHash = newSeq[byte](mhash.size)
|
||||
|
||||
if mhash.size != leaves[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
without signal =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
signal.close().expect("closing once works")
|
||||
|
||||
var tree = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec)
|
||||
|
||||
var task =
|
||||
CodexTreeTask(tree: cast[ptr ByteTree](addr tree), leaves: leaves, signal: signal)
|
||||
|
||||
doAssert tp.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
|
||||
tp.spawn merkleTreeWorker(addr task)
|
||||
|
||||
let threadFut = signal.wait()
|
||||
|
||||
if err =? catch(await threadFut.join()).errorOption:
|
||||
?catch(await noCancel threadFut)
|
||||
if err of CancelledError:
|
||||
raise (ref CancelledError) err
|
||||
|
||||
if not task.success.load():
|
||||
return failure("merkle tree task failed")
|
||||
|
||||
tree.layers = extractValue(task.layers)
|
||||
|
||||
let tree = ?initTree(mcodec, leaves)
|
||||
?await tree.compute(tp)
|
||||
success tree
|
||||
|
||||
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
|
||||
@ -268,23 +203,16 @@ proc fromNodes*(
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
mhash = ?mcodec.mhash()
|
||||
Zero = newSeq[byte](mhash.size)
|
||||
digestSize = ?mcodec.digestSize.mapFailure
|
||||
Zero = newSeq[byte](digestSize)
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
compress(x, y, key, mcodec)
|
||||
|
||||
if mhash.size != nodes[0].len:
|
||||
if digestSize != nodes[0].len:
|
||||
return failure "Invalid hash length"
|
||||
|
||||
var
|
||||
self = CodexTree(compress: compressor, zero: Zero, mcodec: mcodec)
|
||||
layer = nleaves
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add(nodes[pos ..< (pos + layer)])
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
var self = CodexTree(mcodec: mcodec)
|
||||
?self.fromNodes(compressor, Zero, nodes, nleaves)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
@ -306,10 +234,10 @@ func init*(
|
||||
return failure "Empty nodes"
|
||||
|
||||
let
|
||||
mhash = ?mcodec.mhash()
|
||||
Zero = newSeq[byte](mhash.size)
|
||||
digestSize = ?mcodec.digestSize.mapFailure
|
||||
Zero = newSeq[byte](digestSize)
|
||||
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
|
||||
compress(x, y, key, mhash)
|
||||
compress(x, y, key, mcodec)
|
||||
|
||||
success CodexProof(
|
||||
compress: compressor,
|
||||
|
||||
@ -9,22 +9,58 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[bitops, atomics]
|
||||
import std/[bitops, atomics, sequtils]
|
||||
import stew/assign2
|
||||
|
||||
import pkg/questionable/results
|
||||
import pkg/taskpools
|
||||
import pkg/chronos
|
||||
import pkg/chronos/threadsync
|
||||
|
||||
import ../errors
|
||||
import ../utils/uniqueptr
|
||||
import ../utils/sharedbuf
|
||||
|
||||
export sharedbuf
|
||||
|
||||
template nodeData(
|
||||
data: openArray[byte], offsets: openArray[int], nodeSize, i, j: int
|
||||
): openArray[byte] =
|
||||
## Bytes of the j'th entry of the i'th level in the tree, starting with the
|
||||
## leaves (at level 0).
|
||||
let start = (offsets[i] + j) * nodeSize
|
||||
data.toOpenArray(start, start + nodeSize - 1)
|
||||
|
||||
type
|
||||
# TODO hash functions don't fail - removing the ?! from this function would
|
||||
# significantly simplify the flow below
|
||||
CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
|
||||
|
||||
MerkleTree*[H, K] = ref object of RootObj
|
||||
layers*: seq[seq[H]]
|
||||
compress*: CompressFn[H, K]
|
||||
zero*: H
|
||||
CompressData[H, K] = object
|
||||
fn: CompressFn[H, K]
|
||||
nodeSize: int
|
||||
zero: H
|
||||
|
||||
MerkleTreeObj*[H, K] = object of RootObj
|
||||
store*: seq[byte]
|
||||
## Flattened merkle tree where hashes are assumed to be trivial bytes and
|
||||
## uniform in size.
|
||||
##
|
||||
## Each layer of the tree is stored serially starting with the leaves and
|
||||
## ending with the root.
|
||||
##
|
||||
## Beacuse the tree might not be balanced, `layerOffsets` contains the
|
||||
## index of the starting point of each level, for easy lookup.
|
||||
layerOffsets*: seq[int]
|
||||
## Starting point of each level in the tree, starting from the leaves -
|
||||
## multiplied by the entry size, this is the offset in the payload where
|
||||
## the entries of that level start
|
||||
##
|
||||
## For example, a tree with 4 leaves will have [0, 4, 6] stored here.
|
||||
##
|
||||
## See nodesPerLevel function, from whic this sequence is derived
|
||||
compress*: CompressData[H, K]
|
||||
|
||||
MerkleTree*[H, K] = ref MerkleTreeObj[H, K]
|
||||
|
||||
MerkleProof*[H, K] = ref object of RootObj
|
||||
index*: int # linear index of the leaf, starting from 0
|
||||
@ -33,40 +69,99 @@ type
|
||||
compress*: CompressFn[H, K] # compress function
|
||||
zero*: H # zero value
|
||||
|
||||
MerkleTask*[H, K] = object
|
||||
tree*: ptr MerkleTree[H, K]
|
||||
leaves*: seq[H]
|
||||
signal*: ThreadSignalPtr
|
||||
layers*: UniquePtr[seq[seq[H]]]
|
||||
success*: Atomic[bool]
|
||||
func levels*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layerOffsets.len
|
||||
|
||||
func depth*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers.len - 1
|
||||
return self.levels() - 1
|
||||
|
||||
func nodesInLayer(offsets: openArray[int], layer: int): int =
|
||||
if layer == offsets.high:
|
||||
1
|
||||
else:
|
||||
offsets[layer + 1] - offsets[layer]
|
||||
|
||||
func nodesInLayer(self: MerkleTree | MerkleTreeObj, layer: int): int =
|
||||
self.layerOffsets.nodesInLayer(layer)
|
||||
|
||||
func leavesCount*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers[0].len
|
||||
return self.nodesInLayer(0)
|
||||
|
||||
func levels*[H, K](self: MerkleTree[H, K]): int =
|
||||
return self.layers.len
|
||||
func nodesPerLevel(nleaves: int): seq[int] =
|
||||
## Given a number of leaves, return a seq with the number of nodes at each
|
||||
## layer of the tree (from the bottom/leaves to the root)
|
||||
##
|
||||
## Ie For a tree of 4 leaves, return `[4, 2, 1]`
|
||||
if nleaves <= 0:
|
||||
return @[]
|
||||
elif nleaves == 1:
|
||||
return @[1, 1] # leaf and root
|
||||
|
||||
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] =
|
||||
return self.layers[0]
|
||||
var nodes: seq[int] = @[]
|
||||
var m = nleaves
|
||||
while true:
|
||||
nodes.add(m)
|
||||
if m == 1:
|
||||
break
|
||||
# Next layer size is ceil(m/2)
|
||||
m = (m + 1) shr 1
|
||||
|
||||
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] =
|
||||
for layer in self.layers:
|
||||
yield layer
|
||||
nodes
|
||||
|
||||
func layerOffsets(nleaves: int): seq[int] =
|
||||
## Given a number of leaves, return a seq of the starting offsets of each
|
||||
## layer in the node store that results from flattening the binary tree
|
||||
##
|
||||
## Ie For a tree of 4 leaves, return `[0, 4, 6]`
|
||||
let nodes = nodesPerLevel(nleaves)
|
||||
var tot = 0
|
||||
let offsets = nodes.mapIt:
|
||||
let cur = tot
|
||||
tot += it
|
||||
cur
|
||||
offsets
|
||||
|
||||
template nodeData(self: MerkleTreeObj, i, j: int): openArray[byte] =
|
||||
## Bytes of the j'th node of the i'th level in the tree, starting with the
|
||||
## leaves (at level 0).
|
||||
self.store.nodeData(self.layerOffsets, self.compress.nodeSize, i, j)
|
||||
|
||||
func layer*[H, K](
|
||||
self: MerkleTree[H, K], layer: int
|
||||
): seq[H] {.deprecated: "Expensive".} =
|
||||
var nodes = newSeq[H](self.nodesInLayer(layer))
|
||||
for i, h in nodes.mpairs:
|
||||
assign(h, self[].nodeData(layer, i))
|
||||
return nodes
|
||||
|
||||
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} =
|
||||
self.layer(0)
|
||||
|
||||
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] {.deprecated: "Expensive".} =
|
||||
for i in 0 ..< self.layerOffsets.len:
|
||||
yield self.layer(i)
|
||||
|
||||
proc layers*[H, K](self: MerkleTree[H, K]): seq[seq[H]] {.deprecated: "Expensive".} =
|
||||
for l in self.layers():
|
||||
result.add l
|
||||
|
||||
iterator nodes*[H, K](self: MerkleTree[H, K]): H =
|
||||
for layer in self.layers:
|
||||
for node in layer:
|
||||
## Iterate over the nodes of each layer starting with the leaves
|
||||
var node: H
|
||||
for i in 0 ..< self.layerOffsets.len:
|
||||
let nodesInLayer = self.nodesInLayer(i)
|
||||
for j in 0 ..< nodesInLayer:
|
||||
assign(node, self[].nodeData(i, j))
|
||||
yield node
|
||||
|
||||
func root*[H, K](self: MerkleTree[H, K]): ?!H =
|
||||
let last = self.layers[^1]
|
||||
if last.len != 1:
|
||||
mixin assign
|
||||
if self.layerOffsets.len == 0:
|
||||
return failure "invalid tree"
|
||||
|
||||
return success last[0]
|
||||
var h: H
|
||||
assign(h, self[].nodeData(self.layerOffsets.high(), 0))
|
||||
return success h
|
||||
|
||||
func getProof*[H, K](
|
||||
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
|
||||
@ -82,18 +177,19 @@ func getProof*[H, K](
|
||||
var m = nleaves
|
||||
for i in 0 ..< depth:
|
||||
let j = k xor 1
|
||||
path[i] =
|
||||
if (j < m):
|
||||
self.layers[i][j]
|
||||
else:
|
||||
self.zero
|
||||
|
||||
if (j < m):
|
||||
assign(path[i], self[].nodeData(i, j))
|
||||
else:
|
||||
path[i] = self.compress.zero
|
||||
|
||||
k = k shr 1
|
||||
m = (m + 1) shr 1
|
||||
|
||||
proof.index = index
|
||||
proof.path = path
|
||||
proof.nleaves = nleaves
|
||||
proof.compress = self.compress
|
||||
proof.compress = self.compress.fn
|
||||
|
||||
success()
|
||||
|
||||
@ -132,51 +228,169 @@ func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
|
||||
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
|
||||
success bool(root == ?proof.reconstructRoot(leaf))
|
||||
|
||||
func merkleTreeWorker*[H, K](
|
||||
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
|
||||
): ?!seq[seq[H]] =
|
||||
let a = low(xs)
|
||||
let b = high(xs)
|
||||
let m = b - a + 1
|
||||
func fromNodes*[H, K](
|
||||
self: MerkleTree[H, K],
|
||||
compressor: CompressFn,
|
||||
zero: H,
|
||||
nodes: openArray[H],
|
||||
nleaves: int,
|
||||
): ?!void =
|
||||
mixin assign
|
||||
|
||||
if nodes.len < 2: # At least leaf and root
|
||||
return failure "Not enough nodes"
|
||||
|
||||
if nleaves == 0:
|
||||
return failure "No leaves"
|
||||
|
||||
self.compress = CompressData[H, K](fn: compressor, nodeSize: nodes[0].len, zero: zero)
|
||||
self.layerOffsets = layerOffsets(nleaves)
|
||||
|
||||
if self.layerOffsets[^1] + 1 != nodes.len:
|
||||
return failure "bad node count"
|
||||
|
||||
self.store = newSeqUninit[byte](nodes.len * self.compress.nodeSize)
|
||||
|
||||
for i in 0 ..< nodes.len:
|
||||
assign(
|
||||
self[].store.toOpenArray(
|
||||
i * self.compress.nodeSize, (i + 1) * self.compress.nodeSize - 1
|
||||
),
|
||||
nodes[i],
|
||||
)
|
||||
|
||||
success()
|
||||
|
||||
func merkleTreeWorker[H, K](
|
||||
store: var openArray[byte],
|
||||
offsets: openArray[int],
|
||||
compress: CompressData[H, K],
|
||||
layer: int,
|
||||
isBottomLayer: static bool,
|
||||
): ?!void =
|
||||
## Worker used to compute the merkle tree from the leaves that are assumed to
|
||||
## already be stored at the beginning of the `store`, as done by `prepare`.
|
||||
|
||||
# Throughout, we use `assign` to convert from H to bytes and back, assuming
|
||||
# this assignment can be done somewhat efficiently (ie memcpy) - because
|
||||
# the code must work with multihash where len(H) is can differ, we cannot
|
||||
# simply use a fixed-size array here.
|
||||
mixin assign
|
||||
|
||||
template nodeData(i, j: int): openArray[byte] =
|
||||
# Pick out the bytes of node j in layer i
|
||||
store.nodeData(offsets, compress.nodeSize, i, j)
|
||||
|
||||
let m = offsets.nodesInLayer(layer)
|
||||
|
||||
when not isBottomLayer:
|
||||
if m == 1:
|
||||
return success @[@xs]
|
||||
return success()
|
||||
|
||||
let halfn: int = m div 2
|
||||
let n: int = 2 * halfn
|
||||
let isOdd: bool = (n != m)
|
||||
|
||||
var ys: seq[H]
|
||||
if not isOdd:
|
||||
ys = newSeq[H](halfn)
|
||||
else:
|
||||
ys = newSeq[H](halfn + 1)
|
||||
# Because the compression function we work with works with H and not bytes,
|
||||
# we need to extract H from the raw data - a little abstraction tax that
|
||||
# ensures that properties like alignment of H are respected.
|
||||
var a, b, tmp: H
|
||||
|
||||
for i in 0 ..< halfn:
|
||||
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
|
||||
ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key)
|
||||
|
||||
assign(a, nodeData(layer, i * 2))
|
||||
assign(b, nodeData(layer, i * 2 + 1))
|
||||
|
||||
tmp = ?compress.fn(a, b, key = key)
|
||||
|
||||
assign(nodeData(layer + 1, i), tmp)
|
||||
|
||||
if isOdd:
|
||||
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
|
||||
ys[halfn] = ?self.compress(xs[n], self.zero, key = key)
|
||||
|
||||
success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false)
|
||||
assign(a, nodeData(layer, n))
|
||||
|
||||
proc merkleTreeWorker*[H, K](task: ptr MerkleTask[H, K]) {.gcsafe.} =
|
||||
tmp = ?compress.fn(a, compress.zero, key = key)
|
||||
|
||||
assign(nodeData(layer + 1, halfn), tmp)
|
||||
|
||||
merkleTreeWorker(store, offsets, compress, layer + 1, false)
|
||||
|
||||
proc merkleTreeWorker[H, K](
|
||||
store: SharedBuf[byte],
|
||||
offsets: SharedBuf[int],
|
||||
compress: ptr CompressData[H, K],
|
||||
signal: ThreadSignalPtr,
|
||||
): bool =
|
||||
defer:
|
||||
discard task[].signal.fireSync()
|
||||
discard signal.fireSync()
|
||||
|
||||
let res = merkleTreeWorker(task[].tree[], task[].leaves, isBottomLayer = true)
|
||||
let res = merkleTreeWorker(
|
||||
store.toOpenArray(), offsets.toOpenArray(), compress[], 0, isBottomLayer = true
|
||||
)
|
||||
|
||||
if res.isErr:
|
||||
task[].success.store(false)
|
||||
return
|
||||
return res.isOk()
|
||||
|
||||
var layers = res.get()
|
||||
var newOuterSeq = newSeq[seq[H]](layers.len)
|
||||
for i in 0 ..< layers.len:
|
||||
var isoInner = isolate(layers[i])
|
||||
newOuterSeq[i] = extract(isoInner)
|
||||
func prepare*[H, K](
|
||||
self: MerkleTree[H, K], compressor: CompressFn, zero: H, leaves: openArray[H]
|
||||
): ?!void =
|
||||
## Prepare the instance for computing the merkle tree of the given leaves using
|
||||
## the given compression function. After preparation, `compute` should be
|
||||
## called to perform the actual computation. `leaves` will be copied into the
|
||||
## tree so they can be freed after the call.
|
||||
|
||||
task[].layers = newUniquePtr(newOuterSeq)
|
||||
task[].success.store(true)
|
||||
if leaves.len == 0:
|
||||
return failure "No leaves"
|
||||
|
||||
self.compress =
|
||||
CompressData[H, K](fn: compressor, nodeSize: leaves[0].len, zero: zero)
|
||||
self.layerOffsets = layerOffsets(leaves.len)
|
||||
|
||||
self.store = newSeqUninit[byte]((self.layerOffsets[^1] + 1) * self.compress.nodeSize)
|
||||
|
||||
for j in 0 ..< leaves.len:
|
||||
assign(self[].nodeData(0, j), leaves[j])
|
||||
|
||||
return success()
|
||||
|
||||
proc compute*[H, K](self: MerkleTree[H, K]): ?!void =
|
||||
merkleTreeWorker(
|
||||
self.store, self.layerOffsets, self.compress, 0, isBottomLayer = true
|
||||
)
|
||||
|
||||
proc compute*[H, K](
|
||||
self: MerkleTree[H, K], tp: Taskpool
|
||||
): Future[?!void] {.async: (raises: []).} =
|
||||
if tp.numThreads == 1:
|
||||
# With a single thread, there's no point creating a separate task
|
||||
return self.compute()
|
||||
|
||||
# TODO this signal would benefit from reuse across computations
|
||||
without signal =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
signal.close().expect("closing once works")
|
||||
|
||||
let res = tp.spawn merkleTreeWorker(
|
||||
SharedBuf.view(self.store),
|
||||
SharedBuf.view(self.layerOffsets),
|
||||
addr self.compress,
|
||||
signal,
|
||||
)
|
||||
|
||||
# To support cancellation, we'd have to ensure the task we posted to taskpools
|
||||
# exits early - since we're not doing that, block cancellation attempts
|
||||
try:
|
||||
await noCancel signal.wait()
|
||||
except AsyncError as exc:
|
||||
# Since we initialized the signal, the OS or chronos is misbehaving. In any
|
||||
# case, it would mean the task is still running which would cause a memory
|
||||
# a memory violation if we let it run - panic instead
|
||||
raiseAssert "Could not wait for signal, was it initialized? " & exc.msg
|
||||
|
||||
if not res.sync():
|
||||
return failure("merkle tree task failed")
|
||||
|
||||
return success()
|
||||
|
||||
@ -19,7 +19,6 @@ import pkg/constantine/platforms/abstractions
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../utils
|
||||
import ../utils/uniqueptr
|
||||
import ../rng
|
||||
|
||||
import ./merkletree
|
||||
@ -47,7 +46,16 @@ type
|
||||
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
|
||||
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
|
||||
|
||||
Poseidon2TreeTask* = MerkleTask[Poseidon2Hash, PoseidonKeysEnum]
|
||||
proc len*(v: Poseidon2Hash): int =
|
||||
sizeof(v)
|
||||
|
||||
proc assign*(v: var openArray[byte], h: Poseidon2Hash) =
|
||||
doAssert v.len == sizeof(h)
|
||||
copyMem(addr v[0], addr h, sizeof(h))
|
||||
|
||||
proc assign*(h: var Poseidon2Hash, v: openArray[byte]) =
|
||||
doAssert v.len == sizeof(h)
|
||||
copyMem(addr h, addr v[0], sizeof(h))
|
||||
|
||||
proc `$`*(self: Poseidon2Tree): string =
|
||||
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||
@ -68,7 +76,7 @@ converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||
of KeyOdd: KeyOddF
|
||||
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
proc initTree(leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
@ -77,51 +85,24 @@ func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||
var self = Poseidon2Tree()
|
||||
?self.prepare(compressor, Poseidon2Zero, leaves)
|
||||
success self
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
let self = ?initTree(leaves)
|
||||
?self.compute()
|
||||
|
||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
success self
|
||||
|
||||
proc init*(
|
||||
_: type Poseidon2Tree, tp: Taskpool, leaves: seq[Poseidon2Hash]
|
||||
): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
let self = ?initTree(leaves)
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
?await self.compute(tp)
|
||||
|
||||
without signal =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
signal.close().expect("closing once works")
|
||||
|
||||
var tree = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||
var task = Poseidon2TreeTask(
|
||||
tree: cast[ptr Poseidon2Tree](addr tree), leaves: leaves, signal: signal
|
||||
)
|
||||
|
||||
doAssert tp.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
|
||||
tp.spawn merkleTreeWorker(addr task)
|
||||
|
||||
let threadFut = signal.wait()
|
||||
|
||||
if err =? catch(await threadFut.join()).errorOption:
|
||||
?catch(await noCancel threadFut)
|
||||
if err of CancelledError:
|
||||
raise (ref CancelledError) err
|
||||
|
||||
if not task.success.load():
|
||||
return failure("merkle tree task failed")
|
||||
|
||||
tree.layers = extractValue(task.layers)
|
||||
|
||||
success tree
|
||||
success self
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
|
||||
@ -134,23 +115,13 @@ proc init*(
|
||||
proc fromNodes*(
|
||||
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
|
||||
): ?!Poseidon2Tree =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var
|
||||
self = Poseidon2Tree(compress: compressor, zero: zero)
|
||||
layer = nleaves
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add(nodes[pos ..< (pos + layer)])
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
let self = Poseidon2Tree()
|
||||
?self.fromNodes(compressor, Poseidon2Zero, nodes, nleaves)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
|
||||
11
codex/multicodec_exts.nim
Normal file
11
codex/multicodec_exts.nim
Normal file
@ -0,0 +1,11 @@
|
||||
const CodecExts = [
|
||||
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
|
||||
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
|
||||
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
|
||||
("codex-manifest", 0xCD01),
|
||||
("codex-block", 0xCD02),
|
||||
("codex-root", 0xCD03),
|
||||
("codex-slot-root", 0xCD04),
|
||||
("codex-proving-root", 0xCD05),
|
||||
("codex-slot-cell", 0xCD06),
|
||||
]
|
||||
40
codex/multihash_exts.nim
Normal file
40
codex/multihash_exts.nim
Normal file
@ -0,0 +1,40 @@
|
||||
import blscurve/bls_public_exports
|
||||
import pkg/constantine/hashes
|
||||
import poseidon2
|
||||
|
||||
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
|
||||
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||
# See: https://github.com/codex-storage/nim-codex/issues/1162
|
||||
if len(output) > 0:
|
||||
let digest = hashes.sha256.hash(data)
|
||||
copyMem(addr output[0], addr digest[0], 32)
|
||||
|
||||
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.Sponge.digest(data).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
const Sha2256MultiHash* = MHash(
|
||||
mcodec: multiCodec("sha2-256"),
|
||||
size: sha256.sizeDigest,
|
||||
coder: sha2_256hash_constantine,
|
||||
)
|
||||
const HashExts = [
|
||||
# override sha2-256 hash function
|
||||
Sha2256MultiHash,
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
|
||||
size: 32,
|
||||
coder: poseidon2_sponge_rate2,
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
|
||||
size: 32,
|
||||
coder: poseidon2_merkle_2kb_sponge,
|
||||
),
|
||||
]
|
||||
@ -10,10 +10,10 @@
|
||||
|
||||
import
|
||||
std/[options, os, strutils, times, net, atomics],
|
||||
stew/shims/net as stewNet,
|
||||
stew/[objects, results],
|
||||
stew/[objects],
|
||||
nat_traversal/[miniupnpc, natpmp],
|
||||
json_serialization/std/net
|
||||
json_serialization/std/net,
|
||||
results
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
|
||||
@ -44,7 +44,7 @@ import ./indexingstrategy
|
||||
import ./utils
|
||||
import ./errors
|
||||
import ./logutils
|
||||
import ./utils/asynciter
|
||||
import ./utils/safeasynciter
|
||||
import ./utils/trackedfutures
|
||||
|
||||
export logutils
|
||||
@ -52,7 +52,10 @@ export logutils
|
||||
logScope:
|
||||
topics = "codex node"
|
||||
|
||||
const DefaultFetchBatch = 10
|
||||
const
|
||||
DefaultFetchBatch = 1024
|
||||
MaxOnBatchBlocks = 128
|
||||
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
|
||||
|
||||
type
|
||||
Contracts* =
|
||||
@ -78,9 +81,9 @@ type
|
||||
CodexNodeRef* = ref CodexNode
|
||||
|
||||
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
|
||||
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.}
|
||||
BatchProc* =
|
||||
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
|
||||
|
||||
func switch*(self: CodexNodeRef): Switch =
|
||||
return self.switch
|
||||
@ -186,34 +189,62 @@ proc fetchBatched*(
|
||||
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
|
||||
# )
|
||||
|
||||
while not iter.finished:
|
||||
let blockFutures = collect:
|
||||
for i in 0 ..< batchSize:
|
||||
if not iter.finished:
|
||||
let address = BlockAddress.init(cid, iter.next())
|
||||
if not (await address in self.networkStore) or fetchLocal:
|
||||
self.networkStore.getBlock(address)
|
||||
# Sliding window: maintain batchSize blocks in-flight
|
||||
let
|
||||
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
|
||||
refillSize = max(refillThreshold, 1)
|
||||
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
|
||||
|
||||
if blockFutures.len == 0:
|
||||
var
|
||||
blockData: seq[bt.Block]
|
||||
failedBlocks = 0
|
||||
successfulBlocks = 0
|
||||
completedInWindow = 0
|
||||
|
||||
var addresses = newSeqOfCap[BlockAddress](batchSize)
|
||||
for i in 0 ..< batchSize:
|
||||
if not iter.finished:
|
||||
let address = BlockAddress.init(cid, iter.next())
|
||||
if fetchLocal or not (await address in self.networkStore):
|
||||
addresses.add(address)
|
||||
|
||||
var blockResults = await self.networkStore.getBlocks(addresses)
|
||||
|
||||
while not blockResults.finished:
|
||||
without blk =? await blockResults.next(), err:
|
||||
inc(failedBlocks)
|
||||
continue
|
||||
|
||||
without blockResults =? await allFinishedValues[?!bt.Block](blockFutures), err:
|
||||
trace "Some blocks failed to fetch", err = err.msg
|
||||
return failure(err)
|
||||
inc(successfulBlocks)
|
||||
inc(completedInWindow)
|
||||
|
||||
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value)
|
||||
if not onBatch.isNil:
|
||||
blockData.add(blk)
|
||||
if blockData.len >= maxCallbackBlocks:
|
||||
if batchErr =? (await onBatch(blockData)).errorOption:
|
||||
return failure(batchErr)
|
||||
blockData = @[]
|
||||
|
||||
let numOfFailedBlocks = blockResults.len - blocks.len
|
||||
if numOfFailedBlocks > 0:
|
||||
return
|
||||
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")")
|
||||
if completedInWindow >= refillThreshold and not iter.finished:
|
||||
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
|
||||
for i in 0 ..< refillSize:
|
||||
if not iter.finished:
|
||||
let address = BlockAddress.init(cid, iter.next())
|
||||
if fetchLocal or not (await address in self.networkStore):
|
||||
refillAddresses.add(address)
|
||||
|
||||
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption:
|
||||
if refillAddresses.len > 0:
|
||||
blockResults =
|
||||
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
|
||||
completedInWindow = 0
|
||||
|
||||
if failedBlocks > 0:
|
||||
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
|
||||
|
||||
if not onBatch.isNil and blockData.len > 0:
|
||||
if batchErr =? (await onBatch(blockData)).errorOption:
|
||||
return failure(batchErr)
|
||||
|
||||
if not iter.finished:
|
||||
await sleepAsync(1.millis)
|
||||
|
||||
success()
|
||||
|
||||
proc fetchBatched*(
|
||||
@ -403,6 +434,7 @@ proc store*(
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
blockSize = DefaultBlockSize,
|
||||
onBlockStored: OnBlockStoredProc = nil,
|
||||
): Future[?!Cid] {.async.} =
|
||||
## Save stream contents as dataset with given blockSize
|
||||
## to nodes's BlockStore, and return Cid of its manifest
|
||||
@ -432,6 +464,9 @@ proc store*(
|
||||
if err =? (await self.networkStore.putBlock(blk)).errorOption:
|
||||
error "Unable to store block", cid = blk.cid, err = err.msg
|
||||
return failure(&"Unable to store block {blk.cid}")
|
||||
|
||||
if not onBlockStored.isNil:
|
||||
onBlockStored(chunk)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@ -874,6 +909,7 @@ proc stop*(self: CodexNodeRef) {.async.} =
|
||||
if not self.clock.isNil:
|
||||
await self.clock.stop()
|
||||
|
||||
proc close*(self: CodexNodeRef) {.async.} =
|
||||
if not self.networkStore.isNil:
|
||||
await self.networkStore.close
|
||||
|
||||
@ -900,3 +936,10 @@ proc new*(
|
||||
contracts: contracts,
|
||||
trackedFutures: TrackedFutures(),
|
||||
)
|
||||
|
||||
proc hasLocalBlock*(
|
||||
self: CodexNodeRef, cid: Cid
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
## Returns true if the given Cid is present in the local store
|
||||
|
||||
return await (cid in self.networkStore.localStore)
|
||||
|
||||
@ -7,10 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/sequtils
|
||||
import std/mimetypes
|
||||
@ -365,6 +362,22 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
let json = %formatManifest(cid.get(), manifest)
|
||||
return RestApiResponse.response($json, contentType = "application/json")
|
||||
|
||||
router.api(MethodGet, "/api/codex/v1/data/{cid}/exists") do(
|
||||
cid: Cid, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
## Only test if the give CID is available in the local store
|
||||
##
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
if cid.isErr:
|
||||
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||
|
||||
let cid = cid.get()
|
||||
let hasCid = await node.hasLocalBlock(cid)
|
||||
|
||||
let json = %*{$cid: hasCid}
|
||||
return RestApiResponse.response($json, contentType = "application/json")
|
||||
|
||||
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse:
|
||||
let json =
|
||||
%RestRepoStore(
|
||||
|
||||
@ -7,10 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/libp2p/crypto/crypto
|
||||
import pkg/bearssl/rand
|
||||
|
||||
@ -27,9 +27,7 @@
|
||||
## | UInt256 | totalRemainingCollateral | |
|
||||
## +---------------------------------------------------+
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
@ -38,7 +36,6 @@ import std/sequtils
|
||||
import std/times
|
||||
import pkg/chronos
|
||||
import pkg/datastore
|
||||
import pkg/nimcrypto
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
@ -55,6 +52,8 @@ import ../units
|
||||
export requests
|
||||
export logutils
|
||||
|
||||
from nimcrypto import randomBytes
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales reservations"
|
||||
|
||||
@ -92,14 +91,10 @@ type
|
||||
repo: RepoStore
|
||||
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||
|
||||
GetNext* = proc(): Future[?seq[byte]] {.
|
||||
upraises: [], gcsafe, async: (raises: [CancelledError]), closure
|
||||
.}
|
||||
IterDispose* =
|
||||
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
|
||||
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
|
||||
upraises: [], gcsafe, async: (raises: [])
|
||||
.}
|
||||
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
|
||||
OnAvailabilitySaved* =
|
||||
proc(availability: Availability): Future[void] {.async: (raises: []).}
|
||||
StorableIter* = ref object
|
||||
finished*: bool
|
||||
next*: GetNext
|
||||
|
||||
@ -2,7 +2,6 @@ import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
import pkg/upraises
|
||||
import ../contracts/requests
|
||||
import ../errors
|
||||
import ../logutils
|
||||
@ -113,14 +112,12 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||
|
||||
method onFulfilled*(
|
||||
agent: SalesAgent, requestId: RequestId
|
||||
) {.base, gcsafe, upraises: [].} =
|
||||
) {.base, gcsafe, raises: [].} =
|
||||
let cancelled = agent.data.cancelled
|
||||
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
|
||||
cancelled.cancelSoon()
|
||||
|
||||
method onFailed*(
|
||||
agent: SalesAgent, requestId: RequestId
|
||||
) {.base, gcsafe, upraises: [].} =
|
||||
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
|
||||
without request =? agent.data.request:
|
||||
return
|
||||
if agent.data.requestId == requestId:
|
||||
@ -128,7 +125,7 @@ method onFailed*(
|
||||
|
||||
method onSlotFilled*(
|
||||
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, gcsafe, upraises: [].} =
|
||||
) {.base, gcsafe, raises: [].} =
|
||||
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
|
||||
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
||||
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import pkg/libp2p/cid
|
||||
|
||||
import ../market
|
||||
@ -24,21 +23,20 @@ type
|
||||
slotQueue*: SlotQueue
|
||||
simulateProofFailures*: int
|
||||
|
||||
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.}
|
||||
BlocksCb* =
|
||||
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||
OnStore* = proc(
|
||||
request: StorageRequest,
|
||||
expiry: SecondsSince1970,
|
||||
slot: uint64,
|
||||
blocksCb: BlocksCb,
|
||||
isRepairing: bool,
|
||||
): Future[?!void] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
|
||||
@ -15,8 +15,7 @@ logScope:
|
||||
topics = "marketplace slotqueue"
|
||||
|
||||
type
|
||||
OnProcessSlot* =
|
||||
proc(item: SlotQueueItem): Future[void] {.gcsafe, async: (raises: []).}
|
||||
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
|
||||
|
||||
# Non-ref obj copies value when assigned, preventing accidental modification
|
||||
# of values which could cause an incorrect order (eg
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import pkg/questionable
|
||||
import pkg/upraises
|
||||
import ../errors
|
||||
import ../utils/asyncstatemachine
|
||||
import ../market
|
||||
@ -16,17 +15,17 @@ type
|
||||
|
||||
method onCancelled*(
|
||||
state: SaleState, request: StorageRequest
|
||||
): ?State {.base, upraises: [].} =
|
||||
): ?State {.base, raises: [].} =
|
||||
discard
|
||||
|
||||
method onFailed*(
|
||||
state: SaleState, request: StorageRequest
|
||||
): ?State {.base, upraises: [].} =
|
||||
): ?State {.base, raises: [].} =
|
||||
discard
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SaleState, requestId: RequestId, slotIndex: uint64
|
||||
): ?State {.base, upraises: [].} =
|
||||
): ?State {.base, raises: [].} =
|
||||
discard
|
||||
|
||||
proc cancelledEvent*(request: StorageRequest): Event =
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
|
||||
@ -29,7 +29,7 @@ type
|
||||
Block
|
||||
Both
|
||||
|
||||
CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, async: (raises: []).}
|
||||
CidCallback* = proc(cid: Cid): Future[void] {.async: (raises: []).}
|
||||
BlockStore* = ref object of RootObj
|
||||
onBlockStored*: ?CidCallback
|
||||
|
||||
@ -70,6 +70,14 @@ method completeBlock*(
|
||||
) {.base, gcsafe.} =
|
||||
discard
|
||||
|
||||
method getBlocks*(
|
||||
self: BlockStore, addresses: seq[BlockAddress]
|
||||
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
|
||||
## Gets a set of blocks from the blockstore. Blocks might
|
||||
## be returned in any order.
|
||||
|
||||
raiseAssert("getBlocks not implemented!")
|
||||
|
||||
method getBlockAndProof*(
|
||||
self: BlockStore, treeCid: Cid, index: Natural
|
||||
): Future[?!(Block, CodexProof)] {.base, async: (raises: [CancelledError]), gcsafe.} =
|
||||
|
||||
@ -66,6 +66,21 @@ method getBlock*(
|
||||
trace "Error requesting block from cache", cid, error = exc.msg
|
||||
return failure exc
|
||||
|
||||
method getBlocks*(
|
||||
self: CacheStore, addresses: seq[BlockAddress]
|
||||
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
|
||||
var i = 0
|
||||
|
||||
proc isFinished(): bool =
|
||||
i == addresses.len
|
||||
|
||||
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
|
||||
let value = await self.getBlock(addresses[i])
|
||||
inc(i)
|
||||
return value
|
||||
|
||||
return SafeAsyncIter[Block].new(genNext, isFinished)
|
||||
|
||||
method getCidAndProof*(
|
||||
self: CacheStore, treeCid: Cid, index: Natural
|
||||
): Future[?!(Cid, CodexProof)] {.async: (raises: [CancelledError]).} =
|
||||
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/sugar
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -31,6 +31,31 @@ type NetworkStore* = ref object of BlockStore
|
||||
engine*: BlockExcEngine # blockexc decision engine
|
||||
localStore*: BlockStore # local block store
|
||||
|
||||
method getBlocks*(
|
||||
self: NetworkStore, addresses: seq[BlockAddress]
|
||||
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
|
||||
var
|
||||
localAddresses: seq[BlockAddress]
|
||||
remoteAddresses: seq[BlockAddress]
|
||||
|
||||
let runtimeQuota = 10.milliseconds
|
||||
var lastIdle = Moment.now()
|
||||
|
||||
for address in addresses:
|
||||
if not (await address in self.localStore):
|
||||
remoteAddresses.add(address)
|
||||
else:
|
||||
localAddresses.add(address)
|
||||
|
||||
if (Moment.now() - lastIdle) >= runtimeQuota:
|
||||
await idleAsync()
|
||||
lastIdle = Moment.now()
|
||||
|
||||
return chain(
|
||||
await self.localStore.getBlocks(localAddresses),
|
||||
self.engine.requestBlocks(remoteAddresses),
|
||||
)
|
||||
|
||||
method getBlock*(
|
||||
self: NetworkStore, address: BlockAddress
|
||||
): Future[?!Block] {.async: (raises: [CancelledError]).} =
|
||||
|
||||
@ -38,6 +38,21 @@ logScope:
|
||||
# BlockStore API
|
||||
###########################################################
|
||||
|
||||
method getBlocks*(
|
||||
self: RepoStore, addresses: seq[BlockAddress]
|
||||
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
|
||||
var i = 0
|
||||
|
||||
proc isFinished(): bool =
|
||||
i == addresses.len
|
||||
|
||||
proc genNext(): Future[?!Block] {.async: (raises: [CancelledError]).} =
|
||||
let value = await self.getBlock(addresses[i])
|
||||
inc(i)
|
||||
return value
|
||||
|
||||
return SafeAsyncIter[Block].new(genNext, isFinished)
|
||||
|
||||
method getBlock*(
|
||||
self: RepoStore, cid: Cid
|
||||
): Future[?!Block] {.async: (raises: [CancelledError]).} =
|
||||
@ -428,7 +443,6 @@ proc start*(
|
||||
): Future[void] {.async: (raises: [CancelledError, CodexError]).} =
|
||||
## Start repo
|
||||
##
|
||||
|
||||
if self.started:
|
||||
trace "Repo already started"
|
||||
return
|
||||
@ -450,6 +464,5 @@ proc stop*(self: RepoStore): Future[void] {.async: (raises: []).} =
|
||||
return
|
||||
|
||||
trace "Stopping repo"
|
||||
await self.close()
|
||||
|
||||
self.started = false
|
||||
|
||||
@ -7,10 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/sugar
|
||||
import pkg/chronos
|
||||
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
@ -9,10 +9,7 @@
|
||||
|
||||
import std/options
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/stew/ptrops
|
||||
|
||||
@ -1,9 +1,8 @@
|
||||
import std/times
|
||||
import pkg/upraises
|
||||
import ./clock
|
||||
|
||||
type SystemClock* = ref object of Clock
|
||||
|
||||
method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} =
|
||||
method now*(clock: SystemClock): SecondsSince1970 {.raises: [].} =
|
||||
let now = times.now().utc
|
||||
now.toTime().toUnix()
|
||||
|
||||
@ -7,15 +7,13 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/net
|
||||
import std/strutils
|
||||
import std/options
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/stew/shims/net
|
||||
import pkg/stew/endians2
|
||||
|
||||
func remapAddr*(
|
||||
|
||||
@ -9,10 +9,7 @@
|
||||
|
||||
## Partially taken from nim beacon chain
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/strutils
|
||||
import pkg/stew/io2
|
||||
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p/crypto/crypto
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, hashes], pkg/results, pkg/stew/shims/net as stewNet, chronos, chronicles
|
||||
import std/[net, tables, hashes], pkg/results, chronos, chronicles
|
||||
|
||||
import pkg/libp2p
|
||||
|
||||
|
||||
@ -232,3 +232,28 @@ proc empty*[T](_: type SafeAsyncIter[T]): SafeAsyncIter[T] =
|
||||
true
|
||||
|
||||
SafeAsyncIter[T].new(genNext, isFinished)
|
||||
|
||||
proc chain*[T](iters: seq[SafeAsyncIter[T]]): SafeAsyncIter[T] =
|
||||
if iters.len == 0:
|
||||
return SafeAsyncIter[T].empty
|
||||
|
||||
var curIdx = 0
|
||||
|
||||
proc ensureNext(): void =
|
||||
while curIdx < iters.len and iters[curIdx].finished:
|
||||
inc(curIdx)
|
||||
|
||||
proc isFinished(): bool =
|
||||
curIdx == iters.len
|
||||
|
||||
proc genNext(): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||
let item = await iters[curIdx].next()
|
||||
ensureNext()
|
||||
return item
|
||||
|
||||
ensureNext()
|
||||
|
||||
return SafeAsyncIter[T].new(genNext, isFinished)
|
||||
|
||||
proc chain*[T](iters: varargs[SafeAsyncIter[T]]): SafeAsyncIter[T] =
|
||||
chain(iters.toSeq)
|
||||
|
||||
24
codex/utils/sharedbuf.nim
Normal file
24
codex/utils/sharedbuf.nim
Normal file
@ -0,0 +1,24 @@
|
||||
import stew/ptrops
|
||||
|
||||
type SharedBuf*[T] = object
|
||||
payload*: ptr UncheckedArray[T]
|
||||
len*: int
|
||||
|
||||
proc view*[T](_: type SharedBuf, v: openArray[T]): SharedBuf[T] =
|
||||
if v.len > 0:
|
||||
SharedBuf[T](payload: makeUncheckedArray(addr v[0]), len: v.len)
|
||||
else:
|
||||
default(SharedBuf[T])
|
||||
|
||||
template checkIdx(v: SharedBuf, i: int) =
|
||||
doAssert i > 0 and i <= v.len
|
||||
|
||||
proc `[]`*[T](v: SharedBuf[T], i: int): var T =
|
||||
v.checkIdx(i)
|
||||
v.payload[i]
|
||||
|
||||
template toOpenArray*[T](v: SharedBuf[T]): var openArray[T] =
|
||||
v.payload.toOpenArray(0, v.len - 1)
|
||||
|
||||
template toOpenArray*[T](v: SharedBuf[T], s, e: int): var openArray[T] =
|
||||
v.toOpenArray().toOpenArray(s, e)
|
||||
@ -17,7 +17,7 @@ import pkg/chronos
|
||||
import ../logutils
|
||||
|
||||
type
|
||||
TimerCallback* = proc(): Future[void] {.gcsafe, async: (raises: []).}
|
||||
TimerCallback* = proc(): Future[void] {.async: (raises: []).}
|
||||
Timer* = ref object of RootObj
|
||||
callback: TimerCallback
|
||||
interval: Duration
|
||||
|
||||
@ -80,7 +80,7 @@ proc removeSlotsThatHaveEnded(validation: Validation) {.async.} =
|
||||
|
||||
proc markProofAsMissing(
|
||||
validation: Validation, slotId: SlotId, period: Period
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
currentPeriod = validation.getCurrentPeriod()
|
||||
|
||||
@ -91,18 +91,18 @@ proc markProofAsMissing(
|
||||
else:
|
||||
let inDowntime {.used.} = await validation.market.inDowntime(slotId)
|
||||
trace "Proof not missing", checkedPeriod = period, inDowntime
|
||||
except CancelledError:
|
||||
raise
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
error "Marking proof as missing failed", msg = e.msg
|
||||
|
||||
proc markProofsAsMissing(validation: Validation) {.async.} =
|
||||
proc markProofsAsMissing(validation: Validation) {.async: (raises: [CancelledError]).} =
|
||||
let slots = validation.slots
|
||||
for slotId in slots:
|
||||
let previousPeriod = validation.getCurrentPeriod() - 1
|
||||
await validation.markProofAsMissing(slotId, previousPeriod)
|
||||
|
||||
proc run(validation: Validation) {.async: (raises: []).} =
|
||||
proc run(validation: Validation) {.async: (raises: [CancelledError]).} =
|
||||
trace "Validation started"
|
||||
try:
|
||||
while true:
|
||||
|
||||
11
config.nims
11
config.nims
@ -61,11 +61,12 @@ elif defined(macosx) and defined(arm64):
|
||||
switch("passC", "-mcpu=apple-a14")
|
||||
# TODO: newer Clang >=15.0 can: https://github.com/llvm/llvm-project/commit/fcca10c69aaab539962d10fcc59a5f074b73b0de
|
||||
else:
|
||||
switch("passC", "-march=native")
|
||||
if defined(windows):
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782
|
||||
# ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes)
|
||||
switch("passC", "-mno-avx512vl")
|
||||
switch("passC", "-march=x86-64")
|
||||
else: switch("passC", "-march=native")
|
||||
|
||||
|
||||
--tlsEmulation:
|
||||
off
|
||||
@ -91,6 +92,12 @@ else:
|
||||
on
|
||||
--warningAsError:
|
||||
"ProveField:on"
|
||||
--define:
|
||||
"libp2p_multicodec_exts:../../../codex/multicodec_exts.nim"
|
||||
--define:
|
||||
"libp2p_multihash_exts:../../../codex/multihash_exts.nim"
|
||||
--define:
|
||||
"libp2p_contentids_exts:../../../codex/contentids_exts.nim"
|
||||
|
||||
when (NimMajor, NimMinor) >= (1, 4):
|
||||
--warning:
|
||||
|
||||
24
examples/golang/README.md
Normal file
24
examples/golang/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
|
||||
## Pre-requisite
|
||||
|
||||
libcodex.so is needed to be compiled and present in build folder.
|
||||
|
||||
## Compilation
|
||||
|
||||
From the codex root folder:
|
||||
|
||||
```code
|
||||
go build -o codex-go examples/golang/codex.go
|
||||
```
|
||||
|
||||
## Run
|
||||
From the codex root folder:
|
||||
|
||||
|
||||
```code
|
||||
export LD_LIBRARY_PATH=build
|
||||
```
|
||||
|
||||
```code
|
||||
./codex-go
|
||||
```
|
||||
885
examples/golang/codex.go
Normal file
885
examples/golang/codex.go
Normal file
@ -0,0 +1,885 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -L../../build/ -lcodex
|
||||
#cgo LDFLAGS: -L../../ -Wl,-rpath,../../
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include "../../library/libcodex.h"
|
||||
|
||||
typedef struct {
|
||||
int ret;
|
||||
char* msg;
|
||||
size_t len;
|
||||
uintptr_t h;
|
||||
} Resp;
|
||||
|
||||
static void* allocResp(uintptr_t h) {
|
||||
Resp* r = (Resp*)calloc(1, sizeof(Resp));
|
||||
r->h = h;
|
||||
return r;
|
||||
}
|
||||
|
||||
static void freeResp(void* resp) {
|
||||
if (resp != NULL) {
|
||||
free(resp);
|
||||
}
|
||||
}
|
||||
|
||||
static int getRet(void* resp) {
|
||||
if (resp == NULL) {
|
||||
return 0;
|
||||
}
|
||||
Resp* m = (Resp*) resp;
|
||||
return m->ret;
|
||||
}
|
||||
|
||||
void libcodexNimMain(void);
|
||||
|
||||
static void codex_host_init_once(void){
|
||||
static int done;
|
||||
if (!__atomic_exchange_n(&done, 1, __ATOMIC_SEQ_CST)) libcodexNimMain();
|
||||
}
|
||||
|
||||
// resp must be set != NULL in case interest on retrieving data from the callback
|
||||
void callback(int ret, char* msg, size_t len, void* resp);
|
||||
|
||||
static void* cGoCodexNew(const char* configJson, void* resp) {
|
||||
void* ret = codex_new(configJson, (CodexCallback) callback, resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cGoCodexStart(void* codexCtx, void* resp) {
|
||||
return codex_start(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexStop(void* codexCtx, void* resp) {
|
||||
return codex_stop(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexClose(void* codexCtx, void* resp) {
|
||||
return codex_close(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexDestroy(void* codexCtx, void* resp) {
|
||||
return codex_destroy(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexVersion(void* codexCtx, void* resp) {
|
||||
return codex_version(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexRevision(void* codexCtx, void* resp) {
|
||||
return codex_revision(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexRepo(void* codexCtx, void* resp) {
|
||||
return codex_repo(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexSpr(void* codexCtx, void* resp) {
|
||||
return codex_spr(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexPeerId(void* codexCtx, void* resp) {
|
||||
return codex_peer_id(codexCtx, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexUploadInit(void* codexCtx, char* filepath, size_t chunkSize, void* resp) {
|
||||
return codex_upload_init(codexCtx, filepath, chunkSize, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexUploadChunk(void* codexCtx, char* sessionId, const uint8_t* chunk, size_t len, void* resp) {
|
||||
return codex_upload_chunk(codexCtx, sessionId, chunk, len, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexUploadFinalize(void* codexCtx, char* sessionId, void* resp) {
|
||||
return codex_upload_finalize(codexCtx, sessionId, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexUploadCancel(void* codexCtx, char* sessionId, void* resp) {
|
||||
return codex_upload_cancel(codexCtx, sessionId, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexUploadFile(void* codexCtx, char* sessionId, void* resp) {
|
||||
return codex_upload_file(codexCtx, sessionId, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexLogLevel(void* codexCtx, char* logLevel, void* resp) {
|
||||
return codex_log_level(codexCtx, logLevel, (CodexCallback) callback, resp);
|
||||
}
|
||||
|
||||
static int cGoCodexExists(void* codexCtx, char* cid, void* resp) {
|
||||
return codex_storage_exists(codexCtx, cid, (CodexCallback) callback, resp);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/cgo"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type LogFormat string
|
||||
|
||||
const (
|
||||
LogFormatAuto LogFormat = "auto"
|
||||
LogFormatColors LogFormat = "colors"
|
||||
LogFormatNoColors LogFormat = "nocolors"
|
||||
LogFormatJSON LogFormat = "json"
|
||||
)
|
||||
|
||||
type RepoKind string
|
||||
|
||||
const (
|
||||
FS RepoKind = "fs"
|
||||
SQLite RepoKind = "sqlite"
|
||||
LevelDb RepoKind = "leveldb"
|
||||
)
|
||||
|
||||
const defaultBlockSize = 1024 * 64
|
||||
|
||||
type Config struct {
|
||||
// Default: INFO
|
||||
LogLevel string `json:"log-level,omitempty"`
|
||||
|
||||
// Specifies what kind of logs should be written to stdout
|
||||
// Default: auto
|
||||
LogFormat LogFormat `json:"log-format,omitempty"`
|
||||
|
||||
// Enable the metrics server
|
||||
// Default: false
|
||||
MetricsEnabled bool `json:"metrics,omitempty"`
|
||||
|
||||
// Listening address of the metrics server
|
||||
// Default: 127.0.0.1
|
||||
MetricsAddress string `json:"metrics-address,omitempty"`
|
||||
|
||||
// Listening HTTP port of the metrics server
|
||||
// Default: 8008
|
||||
MetricsPort int `json:"metrics-port,omitempty"`
|
||||
|
||||
// The directory where codex will store configuration and data
|
||||
// Default:
|
||||
// $HOME\AppData\Roaming\Codex on Windows
|
||||
// $HOME/Library/Application Support/Codex on macOS
|
||||
// $HOME/.cache/codex on Linux
|
||||
DataDir string `json:"data-dir,omitempty"`
|
||||
|
||||
// Multi Addresses to listen on
|
||||
// Default: ["/ip4/0.0.0.0/tcp/0"]
|
||||
ListenAddrs []string `json:"listen-addrs,omitempty"`
|
||||
|
||||
// Specify method to use for determining public address.
|
||||
// Must be one of: any, none, upnp, pmp, extip:<IP>
|
||||
// Default: any
|
||||
Nat string `json:"nat,omitempty"`
|
||||
|
||||
// Discovery (UDP) port
|
||||
// Default: 8090
|
||||
DiscoveryPort int `json:"disc-port,omitempty"`
|
||||
|
||||
// Source of network (secp256k1) private key file path or name
|
||||
// Default: "key"
|
||||
NetPrivKeyFile string `json:"net-privkey,omitempty"`
|
||||
|
||||
// Specifies one or more bootstrap nodes to use when connecting to the network.
|
||||
BootstrapNodes []string `json:"bootstrap-node,omitempty"`
|
||||
|
||||
// The maximum number of peers to connect to.
|
||||
// Default: 160
|
||||
MaxPeers int `json:"max-peers,omitempty"`
|
||||
|
||||
// Number of worker threads (\"0\" = use as many threads as there are CPU cores available)
|
||||
// Default: 0
|
||||
NumThreads int `json:"num-threads,omitempty"`
|
||||
|
||||
// Node agent string which is used as identifier in network
|
||||
// Default: "Codex"
|
||||
AgentString string `json:"agent-string,omitempty"`
|
||||
|
||||
// Backend for main repo store (fs, sqlite, leveldb)
|
||||
// Default: fs
|
||||
RepoKind RepoKind `json:"repo-kind,omitempty"`
|
||||
|
||||
// The size of the total storage quota dedicated to the node
|
||||
// Default: 20 GiBs
|
||||
StorageQuota int `json:"storage-quota,omitempty"`
|
||||
|
||||
// Default block timeout in seconds - 0 disables the ttl
|
||||
// Default: 30 days
|
||||
BlockTtl int `json:"block-ttl,omitempty"`
|
||||
|
||||
// Time interval in seconds - determines frequency of block
|
||||
// maintenance cycle: how often blocks are checked for expiration and cleanup
|
||||
// Default: 10 minutes
|
||||
BlockMaintenanceInterval int `json:"block-mi,omitempty"`
|
||||
|
||||
// Number of blocks to check every maintenance cycle
|
||||
// Default: 1000
|
||||
BlockMaintenanceNumberOfBlocks int `json:"block-mn,omitempty"`
|
||||
|
||||
// Number of times to retry fetching a block before giving up
|
||||
// Default: 3000
|
||||
BlockRetries int `json:"block-retries,omitempty"`
|
||||
|
||||
// The size of the block cache, 0 disables the cache -
|
||||
// might help on slow hardrives
|
||||
// Default: 0
|
||||
CacheSize int `json:"cache-size,omitempty"`
|
||||
|
||||
// Default: "" (no log file)
|
||||
LogFile string `json:"log-file,omitempty"`
|
||||
}
|
||||
|
||||
type CodexNode struct {
|
||||
ctx unsafe.Pointer
|
||||
}
|
||||
|
||||
type ChunkSize int
|
||||
|
||||
func (c ChunkSize) valOrDefault() int {
|
||||
if c == 0 {
|
||||
return defaultBlockSize
|
||||
}
|
||||
|
||||
return int(c)
|
||||
}
|
||||
|
||||
func (c ChunkSize) toSizeT() C.size_t {
|
||||
return C.size_t(c.valOrDefault())
|
||||
}
|
||||
|
||||
// bridgeCtx is used for managing the C-Go bridge calls.
|
||||
// It contains a wait group for synchronizing the calls,
|
||||
// a cgo.Handle for passing context to the C code,
|
||||
// a response pointer for receiving data from the C code,
|
||||
// and fields for storing the result and error of the call.
|
||||
type bridgeCtx struct {
|
||||
wg *sync.WaitGroup
|
||||
h cgo.Handle
|
||||
resp unsafe.Pointer
|
||||
result string
|
||||
err error
|
||||
|
||||
// Callback used for receiving progress updates during upload/download.
|
||||
//
|
||||
// For the upload, the bytes parameter indicates the number of bytes uploaded.
|
||||
// If the chunk size is superior or equal to the blocksize (passed in init function),
|
||||
// the callback will be called when a block is put in the store.
|
||||
// Otherwise, it will be called when a chunk is pushed into the stream.
|
||||
//
|
||||
// For the download, the bytes is the size of the chunk received, and the chunk
|
||||
// is the actual chunk of data received.
|
||||
onProgress func(bytes int, chunk []byte)
|
||||
}
|
||||
|
||||
// newBridgeCtx creates a new bridge context for managing C-Go calls.
|
||||
// The bridge context is initialized with a wait group and a cgo.Handle.
|
||||
func newBridgeCtx() *bridgeCtx {
|
||||
bridge := &bridgeCtx{}
|
||||
bridge.wg = &sync.WaitGroup{}
|
||||
bridge.wg.Add(1)
|
||||
bridge.h = cgo.NewHandle(bridge)
|
||||
bridge.resp = C.allocResp(C.uintptr_t(uintptr(bridge.h)))
|
||||
return bridge
|
||||
}
|
||||
|
||||
// callError creates an error message for a failed C-Go call.
|
||||
func (b *bridgeCtx) callError(name string) error {
|
||||
return fmt.Errorf("failed the call to %s returned code %d", name, C.getRet(b.resp))
|
||||
}
|
||||
|
||||
// free releases the resources associated with the bridge context,
|
||||
// including the cgo.Handle and the response pointer.
|
||||
func (b *bridgeCtx) free() {
|
||||
if b.h > 0 {
|
||||
b.h.Delete()
|
||||
b.h = 0
|
||||
}
|
||||
|
||||
if b.resp != nil {
|
||||
C.freeResp(b.resp)
|
||||
b.resp = nil
|
||||
}
|
||||
}
|
||||
|
||||
// callback is the function called by the C code to communicate back to Go.
|
||||
// It handles progress updates, successful completions, and errors.
|
||||
// The function uses the response pointer to retrieve the bridge context
|
||||
// and update its state accordingly.
|
||||
//
|
||||
//export callback
|
||||
func callback(ret C.int, msg *C.char, len C.size_t, resp unsafe.Pointer) {
|
||||
if resp == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m := (*C.Resp)(resp)
|
||||
m.ret = ret
|
||||
m.msg = msg
|
||||
m.len = len
|
||||
|
||||
if m.h == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
h := cgo.Handle(m.h)
|
||||
if h == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if v, ok := h.Value().(*bridgeCtx); ok {
|
||||
switch ret {
|
||||
case C.RET_PROGRESS:
|
||||
if v.onProgress == nil {
|
||||
return
|
||||
}
|
||||
if msg != nil {
|
||||
chunk := C.GoBytes(unsafe.Pointer(msg), C.int(len))
|
||||
v.onProgress(int(C.int(len)), chunk)
|
||||
} else {
|
||||
v.onProgress(int(C.int(len)), nil)
|
||||
}
|
||||
case C.RET_OK:
|
||||
retMsg := C.GoStringN(msg, C.int(len))
|
||||
v.result = retMsg
|
||||
v.err = nil
|
||||
if v.wg != nil {
|
||||
v.wg.Done()
|
||||
}
|
||||
case C.RET_ERR:
|
||||
retMsg := C.GoStringN(msg, C.int(len))
|
||||
v.err = errors.New(retMsg)
|
||||
if v.wg != nil {
|
||||
v.wg.Done()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wait waits for the bridge context to complete its operation.
|
||||
// It returns the result and error of the operation.
|
||||
func (b *bridgeCtx) wait() (string, error) {
|
||||
b.wg.Wait()
|
||||
return b.result, b.err
|
||||
}
|
||||
|
||||
type OnUploadProgressFunc func(read, total int, percent float64, err error)
|
||||
|
||||
type UploadOptions struct {
|
||||
// Filepath can be the full path when using UploadFile
|
||||
// otherwise the file name.
|
||||
// It is used to detect the mimetype.
|
||||
Filepath string
|
||||
|
||||
// ChunkSize is the size of each upload chunk, passed as `blockSize` to the Codex node
|
||||
// store. Default is to 64 KB.
|
||||
ChunkSize ChunkSize
|
||||
|
||||
// OnProgress is a callback function that is called after each chunk is uploaded with:
|
||||
// - read: the number of bytes read in the last chunk.
|
||||
// - total: the total number of bytes read so far.
|
||||
// - percent: the percentage of the total file size that has been uploaded. It is
|
||||
// determined from a `stat` call if it is a file and from the length of the buffer
|
||||
// if it is a buffer. Otherwise, it is 0.
|
||||
// - err: an error, if one occurred.
|
||||
//
|
||||
// If the chunk size is more than the `chunkSize` parameter, the callback is called
|
||||
// after the block is actually stored in the block store. Otherwise, it is called
|
||||
// after the chunk is sent to the stream.
|
||||
OnProgress OnUploadProgressFunc
|
||||
}
|
||||
|
||||
func getReaderSize(r io.Reader) int64 {
|
||||
switch v := r.(type) {
|
||||
case *os.File:
|
||||
stat, err := v.Stat()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return stat.Size()
|
||||
case *bytes.Buffer:
|
||||
return int64(v.Len())
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Codex node with the provided configuration.
|
||||
// The node is not started automatically; you need to call CodexStart
|
||||
// to start it.
|
||||
// It returns a Codex node that can be used to interact
|
||||
// with the Codex network.
|
||||
func New(config Config) (*CodexNode, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
jsonConfig, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cJsonConfig := C.CString(string(jsonConfig))
|
||||
defer C.free(unsafe.Pointer(cJsonConfig))
|
||||
|
||||
ctx := C.cGoCodexNew(cJsonConfig, bridge.resp)
|
||||
|
||||
if _, err := bridge.wait(); err != nil {
|
||||
return nil, bridge.err
|
||||
}
|
||||
|
||||
return &CodexNode{ctx: ctx}, bridge.err
|
||||
}
|
||||
|
||||
// Start starts the Codex node.
|
||||
func (node CodexNode) Start() error {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexStart(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return bridge.callError("cGoCodexStart")
|
||||
}
|
||||
|
||||
_, err := bridge.wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// StartAsync is the asynchronous version of Start.
|
||||
func (node CodexNode) StartAsync(onDone func(error)) {
|
||||
go func() {
|
||||
err := node.Start()
|
||||
onDone(err)
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop stops the Codex node.
|
||||
func (node CodexNode) Stop() error {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexStop(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return bridge.callError("cGoCodexStop")
|
||||
}
|
||||
|
||||
_, err := bridge.wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// Destroy destroys the Codex node, freeing all resources.
|
||||
// The node must be stopped before calling this method.
|
||||
func (node CodexNode) Destroy() error {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexClose(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return bridge.callError("cGoCodexClose")
|
||||
}
|
||||
|
||||
_, err := bridge.wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if C.cGoCodexDestroy(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return errors.New("Failed to destroy the codex node.")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Version returns the version of the Codex node.
|
||||
func (node CodexNode) Version() (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexVersion(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexVersion")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
func (node CodexNode) Revision() (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexRevision(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexRevision")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
// Repo returns the path of the data dir folder.
|
||||
func (node CodexNode) Repo() (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexRepo(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexRepo")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
func (node CodexNode) Spr() (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexSpr(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexSpr")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
func (node CodexNode) PeerId() (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if C.cGoCodexPeerId(node.ctx, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexPeerId")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
// UploadInit initializes a new upload session.
|
||||
// It returns a session ID that can be used for subsequent upload operations.
|
||||
// This function is called by UploadReader and UploadFile internally.
|
||||
// You should use this function only if you need to manage the upload session manually.
|
||||
func (node CodexNode) UploadInit(options *UploadOptions) (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
var cFilename = C.CString(options.Filepath)
|
||||
defer C.free(unsafe.Pointer(cFilename))
|
||||
|
||||
if C.cGoCodexUploadInit(node.ctx, cFilename, options.ChunkSize.toSizeT(), bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexUploadInit")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
// UploadChunk uploads a chunk of data to the Codex node.
|
||||
// It takes the session ID returned by UploadInit
|
||||
// and a byte slice containing the chunk data.
|
||||
// This function is called by UploadReader internally.
|
||||
// You should use this function only if you need to manage the upload session manually.
|
||||
func (node CodexNode) UploadChunk(sessionId string, chunk []byte) error {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
var cSessionId = C.CString(sessionId)
|
||||
defer C.free(unsafe.Pointer(cSessionId))
|
||||
|
||||
var cChunkPtr *C.uint8_t
|
||||
if len(chunk) > 0 {
|
||||
cChunkPtr = (*C.uint8_t)(unsafe.Pointer(&chunk[0]))
|
||||
}
|
||||
|
||||
if C.cGoCodexUploadChunk(node.ctx, cSessionId, cChunkPtr, C.size_t(len(chunk)), bridge.resp) != C.RET_OK {
|
||||
return bridge.callError("cGoCodexUploadChunk")
|
||||
}
|
||||
|
||||
_, err := bridge.wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFinalize finalizes the upload session and returns the CID of the uploaded file.
|
||||
// It takes the session ID returned by UploadInit.
|
||||
// This function is called by UploadReader and UploadFile internally.
|
||||
// You should use this function only if you need to manage the upload session manually.
|
||||
func (node CodexNode) UploadFinalize(sessionId string) (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
var cSessionId = C.CString(sessionId)
|
||||
defer C.free(unsafe.Pointer(cSessionId))
|
||||
|
||||
if C.cGoCodexUploadFinalize(node.ctx, cSessionId, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexUploadFinalize")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
// UploadCancel cancels an ongoing upload session.
|
||||
// It can be only if the upload session is managed manually.
|
||||
// It doesn't work with UploadFile.
|
||||
func (node CodexNode) UploadCancel(sessionId string) error {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
var cSessionId = C.CString(sessionId)
|
||||
defer C.free(unsafe.Pointer(cSessionId))
|
||||
|
||||
if C.cGoCodexUploadCancel(node.ctx, cSessionId, bridge.resp) != C.RET_OK {
|
||||
return bridge.callError("cGoCodexUploadCancel")
|
||||
}
|
||||
|
||||
_, err := bridge.wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadReader uploads data from an io.Reader to the Codex node.
|
||||
// It takes the upload options and the reader as parameters.
|
||||
// It returns the CID of the uploaded file or an error.
|
||||
//
|
||||
// Internally, it calls:
|
||||
// - UploadInit to create the upload session.
|
||||
// - UploadChunk to upload a chunk to codex.
|
||||
// - UploadFinalize to finalize the upload session.
|
||||
// - UploadCancel if an error occurs.
|
||||
func (node CodexNode) UploadReader(options UploadOptions, r io.Reader) (string, error) {
|
||||
sessionId, err := node.UploadInit(&options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buf := make([]byte, options.ChunkSize.valOrDefault())
|
||||
total := 0
|
||||
|
||||
var size int64
|
||||
if options.OnProgress != nil {
|
||||
size = getReaderSize(r)
|
||||
}
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if cancelErr := node.UploadCancel(sessionId); cancelErr != nil {
|
||||
return "", fmt.Errorf("failed to upload chunk %v and failed to cancel upload session %v", err, cancelErr)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if err := node.UploadChunk(sessionId, buf[:n]); err != nil {
|
||||
if cancelErr := node.UploadCancel(sessionId); cancelErr != nil {
|
||||
return "", fmt.Errorf("failed to upload chunk %v and failed to cancel upload session %v", err, cancelErr)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
total += n
|
||||
if options.OnProgress != nil && size > 0 {
|
||||
percent := float64(total) / float64(size) * 100.0
|
||||
// The last block could be a bit over the size due to padding
|
||||
// on the chunk size.
|
||||
if percent > 100.0 {
|
||||
percent = 100.0
|
||||
}
|
||||
options.OnProgress(n, total, percent, nil)
|
||||
} else if options.OnProgress != nil {
|
||||
options.OnProgress(n, total, 0, nil)
|
||||
}
|
||||
}
|
||||
|
||||
return node.UploadFinalize(sessionId)
|
||||
}
|
||||
|
||||
// UploadReaderAsync is the asynchronous version of UploadReader using a goroutine.
|
||||
func (node CodexNode) UploadReaderAsync(options UploadOptions, r io.Reader, onDone func(cid string, err error)) {
|
||||
go func() {
|
||||
cid, err := node.UploadReader(options, r)
|
||||
onDone(cid, err)
|
||||
}()
|
||||
}
|
||||
|
||||
// UploadFile uploads a file to the Codex node.
|
||||
// It takes the upload options as parameter.
|
||||
// It returns the CID of the uploaded file or an error.
|
||||
//
|
||||
// The options parameter contains the following fields:
|
||||
// - filepath: the full path of the file to upload.
|
||||
// - chunkSize: the size of each upload chunk, passed as `blockSize` to the Codex node
|
||||
// store. Default is to 64 KB.
|
||||
// - onProgress: a callback function that is called after each chunk is uploaded with:
|
||||
// - read: the number of bytes read in the last chunk.
|
||||
// - total: the total number of bytes read so far.
|
||||
// - percent: the percentage of the total file size that has been uploaded. It is
|
||||
// determined from a `stat` call.
|
||||
// - err: an error, if one occurred.
|
||||
//
|
||||
// If the chunk size is more than the `chunkSize` parameter, the callback is called after
|
||||
// the block is actually stored in the block store. Otherwise, it is called after the chunk
|
||||
// is sent to the stream.
|
||||
//
|
||||
// Internally, it calls UploadInit to create the upload session.
|
||||
func (node CodexNode) UploadFile(options UploadOptions) (string, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
if options.OnProgress != nil {
|
||||
stat, err := os.Stat(options.Filepath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
size := stat.Size()
|
||||
total := 0
|
||||
|
||||
if size > 0 {
|
||||
bridge.onProgress = func(read int, _ []byte) {
|
||||
if read == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
total += read
|
||||
percent := float64(total) / float64(size) * 100.0
|
||||
// The last block could be a bit over the size due to padding
|
||||
// on the chunk size.
|
||||
if percent > 100.0 {
|
||||
percent = 100.0
|
||||
}
|
||||
|
||||
options.OnProgress(read, int(size), percent, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sessionId, err := node.UploadInit(&options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var cSessionId = C.CString(sessionId)
|
||||
defer C.free(unsafe.Pointer(cSessionId))
|
||||
|
||||
if C.cGoCodexUploadFile(node.ctx, cSessionId, bridge.resp) != C.RET_OK {
|
||||
return "", bridge.callError("cGoCodexUploadFile")
|
||||
}
|
||||
|
||||
return bridge.wait()
|
||||
}
|
||||
|
||||
// UploadFileAsync is the asynchronous version of UploadFile using a goroutine.
|
||||
func (node CodexNode) UploadFileAsync(options UploadOptions, onDone func(cid string, err error)) {
|
||||
go func() {
|
||||
cid, err := node.UploadFile(options)
|
||||
onDone(cid, err)
|
||||
}()
|
||||
}
|
||||
|
||||
func (node CodexNode) UpdateLogLevel(logLevel string) error {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
var cLogLevel = C.CString(string(logLevel))
|
||||
defer C.free(unsafe.Pointer(cLogLevel))
|
||||
|
||||
if C.cGoCodexLogLevel(node.ctx, cLogLevel, bridge.resp) != C.RET_OK {
|
||||
return bridge.callError("cGoCodexLogLevel")
|
||||
}
|
||||
|
||||
_, err := bridge.wait()
|
||||
return err
|
||||
}
|
||||
|
||||
func (node CodexNode) Exists(cid string) (bool, error) {
|
||||
bridge := newBridgeCtx()
|
||||
defer bridge.free()
|
||||
|
||||
var cCid = C.CString(cid)
|
||||
defer C.free(unsafe.Pointer(cCid))
|
||||
|
||||
if C.cGoCodexExists(node.ctx, cCid, bridge.resp) != C.RET_OK {
|
||||
return false, bridge.callError("cGoCodexUploadCancel")
|
||||
}
|
||||
|
||||
result, err := bridge.wait()
|
||||
return result == "true", err
|
||||
}
|
||||
|
||||
func main() {
|
||||
dataDir := os.TempDir() + "/data-dir"
|
||||
|
||||
node, err := New(Config{
|
||||
BlockRetries: 5,
|
||||
LogLevel: "WARN",
|
||||
DataDir: dataDir,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create Codex node: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(dataDir)
|
||||
|
||||
if err := node.Start(); err != nil {
|
||||
log.Fatalf("Failed to start Codex node: %v", err)
|
||||
}
|
||||
log.Println("Codex node started")
|
||||
|
||||
version, err := node.Version()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get Codex version: %v", err)
|
||||
}
|
||||
log.Printf("Codex version: %s", version)
|
||||
|
||||
err = node.UpdateLogLevel("ERROR")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to update log level: %v", err)
|
||||
}
|
||||
|
||||
cid := "zDvZRwzmAkhzDRPH5EW242gJBNZ2T7aoH2v1fVH66FxXL4kSbvyM"
|
||||
exists, err := node.Exists(cid)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to check data existence: %v", err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Fatalf("The data should not exist")
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer([]byte("Hello World!"))
|
||||
len := buf.Len()
|
||||
cid, err = node.UploadReader(UploadOptions{Filepath: "hello.txt"}, buf)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to upload data: %v", err)
|
||||
}
|
||||
log.Printf("Uploaded data with CID: %s (size: %d bytes)", cid, len)
|
||||
|
||||
exists, err = node.Exists(cid)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to check data existence: %v", err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
log.Fatalf("The data should exist")
|
||||
}
|
||||
|
||||
// Wait for a SIGINT or SIGTERM signal
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-ch
|
||||
|
||||
if err := node.Stop(); err != nil {
|
||||
log.Fatalf("Failed to stop Codex node: %v", err)
|
||||
}
|
||||
log.Println("Codex node stopped")
|
||||
|
||||
if err := node.Destroy(); err != nil {
|
||||
log.Fatalf("Failed to destroy Codex node: %v", err)
|
||||
}
|
||||
}
|
||||
1
examples/golang/hello.txt
Normal file
1
examples/golang/hello.txt
Normal file
@ -0,0 +1 @@
|
||||
Hello World!
|
||||
37
library/README.md
Normal file
37
library/README.md
Normal file
@ -0,0 +1,37 @@
|
||||
# Codex Library
|
||||
|
||||
Codex exposes a C binding that serves as a stable contract, making it straightforward to integrate Codex into other languages such as Go.
|
||||
|
||||
The implementation was inspired by [nim-library-template](https://github.com/logos-co/nim-library-template)
|
||||
and by the [nwaku](https://github.com/waku-org/nwaku/tree/master/library) library.
|
||||
|
||||
The source code contains detailed comments to explain the threading and callback flow.
|
||||
The diagram below summarizes the lifecycle: context creation, request execution, and shutdown.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
actor App as App/User
|
||||
participant Go as Go Wrapper
|
||||
participant C as C API (libcodex.h)
|
||||
participant Ctx as CodexContext
|
||||
participant Thr as Worker Thread
|
||||
participant Eng as CodexServer
|
||||
|
||||
App->>Go: Start
|
||||
Go->>C: codex_start_node
|
||||
C->>Ctx: enqueue request
|
||||
C->>Ctx: fire signal
|
||||
Ctx->>Thr: wake worker
|
||||
Thr->>Ctx: dequeue request
|
||||
Thr-->>Ctx: ACK
|
||||
Ctx-->>C: forward ACK
|
||||
C-->>Go: RET OK
|
||||
Go->>App: Unblock
|
||||
Thr->>Eng: execute (async)
|
||||
Eng-->>Thr: result ready
|
||||
Thr-->>Ctx: callback
|
||||
Ctx-->>C: forward callback
|
||||
C-->>Go: forward callback
|
||||
Go-->>App: done
|
||||
```
|
||||
42
library/alloc.nim
Normal file
42
library/alloc.nim
Normal file
@ -0,0 +1,42 @@
|
||||
## Can be shared safely between threads
|
||||
type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int]
|
||||
|
||||
proc alloc*(str: cstring): cstring =
|
||||
# Byte allocation from the given address.
|
||||
# There should be the corresponding manual deallocation with deallocShared !
|
||||
if str.isNil():
|
||||
var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator
|
||||
ret[0] = '\0' # Set the null terminator
|
||||
return ret
|
||||
|
||||
let ret = cast[cstring](allocShared(len(str) + 1))
|
||||
copyMem(ret, str, len(str) + 1)
|
||||
return ret
|
||||
|
||||
proc alloc*(str: string): cstring =
|
||||
## Byte allocation from the given address.
|
||||
## There should be the corresponding manual deallocation with deallocShared !
|
||||
var ret = cast[cstring](allocShared(str.len + 1))
|
||||
let s = cast[seq[char]](str)
|
||||
for i in 0 ..< str.len:
|
||||
ret[i] = s[i]
|
||||
ret[str.len] = '\0'
|
||||
return ret
|
||||
|
||||
proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] =
|
||||
let data = allocShared(sizeof(T) * s.len)
|
||||
if s.len != 0:
|
||||
copyMem(data, unsafeAddr s[0], s.len)
|
||||
return (cast[ptr UncheckedArray[T]](data), s.len)
|
||||
|
||||
proc deallocSharedSeq*[T](s: var SharedSeq[T]) =
|
||||
deallocShared(s.data)
|
||||
s.len = 0
|
||||
|
||||
proc toSeq*[T](s: SharedSeq[T]): seq[T] =
|
||||
## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required
|
||||
## as req[T] is a GC managed type.
|
||||
var ret = newSeq[T]()
|
||||
for i in 0 ..< s.len:
|
||||
ret.add(s.data[i])
|
||||
return ret
|
||||
225
library/codex_context.nim
Normal file
225
library/codex_context.nim
Normal file
@ -0,0 +1,225 @@
|
||||
## This file defines the Codex context and its thread flow:
|
||||
## 1. Client enqueues a request and signals the Codex thread.
|
||||
## 2. The Codex thread dequeues the request and sends an ack (reqReceivedSignal).
|
||||
## 3. The Codex thread executes the request asynchronously.
|
||||
## 4. On completion, the Codex thread invokes the client callback with the result and userData.
|
||||
|
||||
{.pragma: exported, exportc, cdecl, raises: [].}
|
||||
{.pragma: callback, cdecl, raises: [], gcsafe.}
|
||||
{.passc: "-fPIC".}
|
||||
|
||||
import std/[options, locks, atomics]
|
||||
import chronicles
|
||||
import chronos
|
||||
import chronos/threadsync
|
||||
import taskpools/channels_spsc_single
|
||||
import ./ffi_types
|
||||
import ./codex_thread_requests/[codex_thread_request]
|
||||
|
||||
from ../codex/codex import CodexServer
|
||||
|
||||
logScope:
|
||||
topics = "codexlib"
|
||||
|
||||
type CodexContext* = object
|
||||
thread: Thread[(ptr CodexContext)]
|
||||
|
||||
# This lock is only necessary while we use a SP Channel and while the signalling
|
||||
# between threads assumes that there aren't concurrent requests.
|
||||
# Rearchitecting the signaling + migrating to a MP Channel will allow us to receive
|
||||
# requests concurrently and spare us the need of locks
|
||||
lock: Lock
|
||||
|
||||
# Channel to send requests to the Codex thread.
|
||||
# Requests will be popped from this channel.
|
||||
reqChannel: ChannelSPSCSingle[ptr CodexThreadRequest]
|
||||
|
||||
# To notify the Codex thread that a request is ready
|
||||
reqSignal: ThreadSignalPtr
|
||||
|
||||
# To notify the client thread that the request was received.
|
||||
# It is acknowledgment signal (handshake).
|
||||
reqReceivedSignal: ThreadSignalPtr
|
||||
|
||||
# Custom state attached by the client to a request,
|
||||
# returned when its callback is invoked
|
||||
userData*: pointer
|
||||
|
||||
# Function called by the library to notify the client of global events
|
||||
eventCallback*: pointer
|
||||
|
||||
# Custom state attached by the client to the context,
|
||||
# returned with every event callback
|
||||
eventUserData*: pointer
|
||||
|
||||
# Set to false to stop the Codex thread (during codex_destroy)
|
||||
running: Atomic[bool]
|
||||
|
||||
template callEventCallback(ctx: ptr CodexContext, eventName: string, body: untyped) =
|
||||
## Template used to notify the client of global events
|
||||
## Example: onConnectionChanged, onProofMissing, etc.
|
||||
if isNil(ctx[].eventCallback):
|
||||
error eventName & " - eventCallback is nil"
|
||||
return
|
||||
|
||||
foreignThreadGc:
|
||||
try:
|
||||
let event = body
|
||||
cast[CodexCallback](ctx[].eventCallback)(
|
||||
RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
|
||||
)
|
||||
except CatchableError:
|
||||
let msg =
|
||||
"Exception " & eventName & " when calling 'eventCallBack': " &
|
||||
getCurrentExceptionMsg()
|
||||
cast[CodexCallback](ctx[].eventCallback)(
|
||||
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
|
||||
)
|
||||
|
||||
proc sendRequestToCodexThread*(
|
||||
ctx: ptr CodexContext,
|
||||
reqType: RequestType,
|
||||
reqContent: pointer,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
timeout = InfiniteDuration,
|
||||
): Result[void, string] =
|
||||
ctx.lock.acquire()
|
||||
|
||||
defer:
|
||||
ctx.lock.release()
|
||||
|
||||
let req = CodexThreadRequest.createShared(reqType, reqContent, callback, userData)
|
||||
|
||||
# Send the request to the Codex thread
|
||||
let sentOk = ctx.reqChannel.trySend(req)
|
||||
if not sentOk:
|
||||
deallocShared(req)
|
||||
return err("Failed to send request to the codex thread: " & $req[])
|
||||
|
||||
# Notify the Codex thread that a request is available
|
||||
let fireSyncRes = ctx.reqSignal.fireSync()
|
||||
if fireSyncRes.isErr():
|
||||
deallocShared(req)
|
||||
return err(
|
||||
"Failed to send request to the codex thread: unable to fireSync: " &
|
||||
$fireSyncRes.error
|
||||
)
|
||||
|
||||
if fireSyncRes.get() == false:
|
||||
deallocShared(req)
|
||||
return err("Failed to send request to the codex thread: fireSync timed out.")
|
||||
|
||||
# Wait until the Codex Thread properly received the request
|
||||
let res = ctx.reqReceivedSignal.waitSync(timeout)
|
||||
if res.isErr():
|
||||
deallocShared(req)
|
||||
return err(
|
||||
"Failed to send request to the codex thread: unable to receive reqReceivedSignal signal."
|
||||
)
|
||||
|
||||
## Notice that in case of "ok", the deallocShared(req) is performed by the Codex Thread in the
|
||||
## process proc. See the 'codex_thread_request.nim' module for more details.
|
||||
ok()
|
||||
|
||||
proc runCodex(ctx: ptr CodexContext) {.async: (raises: []).} =
|
||||
var codex: CodexServer
|
||||
|
||||
while true:
|
||||
try:
|
||||
# Wait until a request is available
|
||||
await ctx.reqSignal.wait()
|
||||
except Exception as e:
|
||||
error "Failure in run codex thread while waiting for reqSignal.", error = e.msg
|
||||
continue
|
||||
|
||||
# If codex_destroy was called, exit the loop
|
||||
if ctx.running.load == false:
|
||||
break
|
||||
|
||||
var request: ptr CodexThreadRequest
|
||||
|
||||
# Pop a request from the channel
|
||||
let recvOk = ctx.reqChannel.tryRecv(request)
|
||||
if not recvOk:
|
||||
error "Failure in run codex: unable to receive request in codex thread."
|
||||
continue
|
||||
|
||||
# yield immediately to the event loop
|
||||
# with asyncSpawn only, the code will be executed
|
||||
# synchronously until the first await
|
||||
asyncSpawn (
|
||||
proc() {.async.} =
|
||||
await sleepAsync(0)
|
||||
await CodexThreadRequest.process(request, addr codex)
|
||||
)()
|
||||
|
||||
# Notify the main thread that we picked up the request
|
||||
let fireRes = ctx.reqReceivedSignal.fireSync()
|
||||
if fireRes.isErr():
|
||||
error "Failure in run codex: unable to fire back to requester thread.",
|
||||
error = fireRes.error
|
||||
|
||||
proc run(ctx: ptr CodexContext) {.thread.} =
|
||||
waitFor runCodex(ctx)
|
||||
|
||||
proc createCodexContext*(): Result[ptr CodexContext, string] =
|
||||
## This proc is called from the main thread and it creates
|
||||
## the Codex working thread.
|
||||
|
||||
# Allocates a CodexContext in shared memory (for the main thread)
|
||||
var ctx = createShared(CodexContext, 1)
|
||||
|
||||
# This signal is used by the main side to wake the Codex thread
|
||||
# when a new request is enqueued.
|
||||
ctx.reqSignal = ThreadSignalPtr.new().valueOr:
|
||||
return
|
||||
err("Failed to create a context: unable to create reqSignal ThreadSignalPtr.")
|
||||
|
||||
# Used to let the caller know that the Codex thread has
|
||||
# acknowledged / picked up a request (like a handshake).
|
||||
ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
|
||||
return err(
|
||||
"Failed to create codex context: unable to create reqReceivedSignal ThreadSignalPtr."
|
||||
)
|
||||
|
||||
# Protects shared state inside CodexContext
|
||||
ctx.lock.initLock()
|
||||
|
||||
# Codex thread will loop until codex_destroy is called
|
||||
ctx.running.store(true)
|
||||
|
||||
try:
|
||||
createThread(ctx.thread, run, ctx)
|
||||
except ValueError, ResourceExhaustedError:
|
||||
freeShared(ctx)
|
||||
return err(
|
||||
"Failed to create codex context: unable to create thread: " &
|
||||
getCurrentExceptionMsg()
|
||||
)
|
||||
|
||||
return ok(ctx)
|
||||
|
||||
proc destroyCodexContext*(ctx: ptr CodexContext): Result[void, string] =
|
||||
# Signal the Codex thread to stop
|
||||
ctx.running.store(false)
|
||||
|
||||
# Wake the worker up if it's waiting
|
||||
let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
|
||||
return err("Failed to destroy codex context: " & $error)
|
||||
|
||||
if not signaledOnTime:
|
||||
return err(
|
||||
"Failed to destroy codex context: unable to get signal reqSignal on time in destroyCodexContext."
|
||||
)
|
||||
|
||||
# Wait for the thread to finish
|
||||
joinThread(ctx.thread)
|
||||
|
||||
# Clean up
|
||||
ctx.lock.deinitLock()
|
||||
?ctx.reqSignal.close()
|
||||
?ctx.reqReceivedSignal.close()
|
||||
freeShared(ctx)
|
||||
|
||||
return ok()
|
||||
126
library/codex_thread_requests/codex_thread_request.nim
Normal file
126
library/codex_thread_requests/codex_thread_request.nim
Normal file
@ -0,0 +1,126 @@
|
||||
## This file contains the base message request type that will be handled.
|
||||
## The requests are created by the main thread and processed by
|
||||
## the Codex Thread.
|
||||
|
||||
import std/json
|
||||
import results
|
||||
import chronos
|
||||
import ../ffi_types
|
||||
import ./requests/node_lifecycle_request
|
||||
import ./requests/node_info_request
|
||||
import ./requests/node_debug_request
|
||||
import ./requests/node_p2p_request
|
||||
import ./requests/node_upload_request
|
||||
import ./requests/node_download_request
|
||||
import ./requests/node_storage_request
|
||||
|
||||
from ../../codex/codex import CodexServer
|
||||
|
||||
type RequestType* {.pure.} = enum
|
||||
LIFECYCLE
|
||||
INFO
|
||||
DEBUG
|
||||
P2P
|
||||
UPLOAD
|
||||
DOWNLOAD
|
||||
STORAGE
|
||||
|
||||
type CodexThreadRequest* = object
|
||||
reqType: RequestType
|
||||
|
||||
# Request payloed
|
||||
reqContent: pointer
|
||||
|
||||
# Callback to notify the client thread of the result
|
||||
callback: CodexCallback
|
||||
|
||||
# Custom state attached by the client to the request,
|
||||
# returned when its callback is invoked.
|
||||
userData: pointer
|
||||
|
||||
proc createShared*(
|
||||
T: type CodexThreadRequest,
|
||||
reqType: RequestType,
|
||||
reqContent: pointer,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].reqType = reqType
|
||||
ret[].reqContent = reqContent
|
||||
ret[].callback = callback
|
||||
ret[].userData = userData
|
||||
return ret
|
||||
|
||||
# NOTE: User callbacks are executed on the working thread.
|
||||
# They must be fast and non-blocking; otherwise this thread will be blocked
|
||||
# and no further requests can be processed.
|
||||
# We can improve this by dispatching the callbacks to a thread pool or
|
||||
# moving to a MP channel.
|
||||
# See: https://github.com/codex-storage/nim-codex/pull/1322#discussion_r2340708316
|
||||
proc handleRes[T: string | void | seq[byte]](
|
||||
res: Result[T, string], request: ptr CodexThreadRequest
|
||||
) =
|
||||
## Handles the Result responses, which can either be Result[string, string] or
|
||||
## Result[void, string].
|
||||
defer:
|
||||
deallocShared(request)
|
||||
|
||||
if res.isErr():
|
||||
foreignThreadGc:
|
||||
let msg = $res.error
|
||||
if msg == "":
|
||||
request[].callback(RET_ERR, nil, cast[csize_t](0), request[].userData)
|
||||
else:
|
||||
request[].callback(
|
||||
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData
|
||||
)
|
||||
return
|
||||
|
||||
foreignThreadGc:
|
||||
var msg: cstring = ""
|
||||
when T is string:
|
||||
msg = res.get().cstring()
|
||||
request[].callback(
|
||||
RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData
|
||||
)
|
||||
return
|
||||
|
||||
proc process*(
|
||||
T: type CodexThreadRequest, request: ptr CodexThreadRequest, codex: ptr CodexServer
|
||||
) {.async: (raises: []).} =
|
||||
## Processes the request in the Codex thread.
|
||||
## Dispatch to the appropriate request handler based on reqType.
|
||||
let retFut =
|
||||
case request[].reqType
|
||||
of LIFECYCLE:
|
||||
cast[ptr NodeLifecycleRequest](request[].reqContent).process(codex)
|
||||
of INFO:
|
||||
cast[ptr NodeInfoRequest](request[].reqContent).process(codex)
|
||||
of RequestType.DEBUG:
|
||||
cast[ptr NodeDebugRequest](request[].reqContent).process(codex)
|
||||
of P2P:
|
||||
cast[ptr NodeP2PRequest](request[].reqContent).process(codex)
|
||||
of STORAGE:
|
||||
cast[ptr NodeStorageRequest](request[].reqContent).process(codex)
|
||||
of DOWNLOAD:
|
||||
let onChunk = proc(bytes: seq[byte]) =
|
||||
if bytes.len > 0:
|
||||
request[].callback(
|
||||
RET_PROGRESS,
|
||||
cast[ptr cchar](unsafeAddr bytes[0]),
|
||||
cast[csize_t](bytes.len),
|
||||
request[].userData,
|
||||
)
|
||||
|
||||
cast[ptr NodeDownloadRequest](request[].reqContent).process(codex, onChunk)
|
||||
of UPLOAD:
|
||||
let onBlockReceived = proc(bytes: int) =
|
||||
request[].callback(RET_PROGRESS, nil, cast[csize_t](bytes), request[].userData)
|
||||
|
||||
cast[ptr NodeUploadRequest](request[].reqContent).process(codex, onBlockReceived)
|
||||
|
||||
handleRes(await retFut, request)
|
||||
|
||||
proc `$`*(self: CodexThreadRequest): string =
|
||||
return $self.reqType
|
||||
126
library/codex_thread_requests/requests/node_debug_request.nim
Normal file
126
library/codex_thread_requests/requests/node_debug_request.nim
Normal file
@ -0,0 +1,126 @@
|
||||
{.push raises: [].}
|
||||
|
||||
## This file contains the debug info available with Codex.
|
||||
## The DEBUG type will return info about the P2P node.
|
||||
## The PEER type is available only with codex_enable_api_debug_peers flag.
|
||||
## It will return info about a specific peer if available.
|
||||
|
||||
import std/[options]
|
||||
import chronos
|
||||
import chronicles
|
||||
import codexdht/discv5/spr
|
||||
import ../../alloc
|
||||
import ../../../codex/conf
|
||||
import ../../../codex/rest/json
|
||||
import ../../../codex/node
|
||||
|
||||
from ../../../codex/codex import CodexServer, node
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexlibdebug"
|
||||
|
||||
type NodeDebugMsgType* = enum
|
||||
DEBUG
|
||||
PEER
|
||||
LOG_LEVEL
|
||||
|
||||
type NodeDebugRequest* = object
|
||||
operation: NodeDebugMsgType
|
||||
peerId: cstring
|
||||
logLevel: cstring
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeDebugRequest,
|
||||
op: NodeDebugMsgType,
|
||||
peerId: cstring = "",
|
||||
logLevel: cstring = "",
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].peerId = peerId.alloc()
|
||||
ret[].logLevel = logLevel.alloc()
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeDebugRequest) =
|
||||
deallocShared(self[].peerId)
|
||||
deallocShared(self[].logLevel)
|
||||
deallocShared(self)
|
||||
|
||||
proc getDebug(
|
||||
codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let node = codex[].node
|
||||
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
|
||||
|
||||
let json =
|
||||
%*{
|
||||
"id": $node.switch.peerInfo.peerId,
|
||||
"addrs": node.switch.peerInfo.addrs.mapIt($it),
|
||||
"spr":
|
||||
if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "",
|
||||
"announceAddresses": node.discovery.announceAddrs,
|
||||
"table": table,
|
||||
}
|
||||
|
||||
return ok($json)
|
||||
|
||||
proc getPeer(
|
||||
codex: ptr CodexServer, peerId: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
when codex_enable_api_debug_peers:
|
||||
let node = codex[].node
|
||||
let res = PeerId.init($peerId)
|
||||
if res.isErr:
|
||||
return err("Failed to get peer: invalid peer ID " & $peerId & ": " & $res.error())
|
||||
|
||||
let id = res.get()
|
||||
|
||||
try:
|
||||
let peerRecord = await node.findPeer(id)
|
||||
if peerRecord.isNone:
|
||||
return err("Failed to get peer: peer not found")
|
||||
|
||||
return ok($ %RestPeerRecord.init(peerRecord.get()))
|
||||
except CancelledError:
|
||||
return err("Failed to get peer: operation cancelled")
|
||||
except CatchableError as e:
|
||||
return err("Failed to get peer: " & e.msg)
|
||||
else:
|
||||
return err("Failed to get peer: peer debug API is disabled")
|
||||
|
||||
proc updateLogLevel(
|
||||
codex: ptr CodexServer, logLevel: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
try:
|
||||
{.gcsafe.}:
|
||||
updateLogLevel($logLevel)
|
||||
except ValueError as err:
|
||||
return err("Failed to update log level: invalid value for log level: " & err.msg)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeDebugRequest, codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of NodeDebugMsgType.DEBUG:
|
||||
let res = (await getDebug(codex))
|
||||
if res.isErr:
|
||||
error "Failed to get DEBUG.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeDebugMsgType.PEER:
|
||||
let res = (await getPeer(codex, self.peerId))
|
||||
if res.isErr:
|
||||
error "Failed to get PEER.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeDebugMsgType.LOG_LEVEL:
|
||||
let res = (await updateLogLevel(codex, self.logLevel))
|
||||
if res.isErr:
|
||||
error "Failed to update LOG_LEVEL.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
336
library/codex_thread_requests/requests/node_download_request.nim
Normal file
336
library/codex_thread_requests/requests/node_download_request.nim
Normal file
@ -0,0 +1,336 @@
|
||||
{.push raises: [].}
|
||||
|
||||
## This file contains the download request.
|
||||
## A session is created for each download identified by the CID,
|
||||
## allowing to resume, pause and cancel the download (using chunks).
|
||||
##
|
||||
## There are two ways to download a file:
|
||||
## 1. Via chunks: the cid parameter is the CID of the file to download. Steps are:
|
||||
## - INIT: initializes the download session
|
||||
## - CHUNK: downloads the next chunk of the file
|
||||
## - CANCEL: cancels the download session
|
||||
## 2. Via stream.
|
||||
## - INIT: initializes the download session
|
||||
## - STREAM: downloads the file in a streaming manner, calling
|
||||
## the onChunk handler for each chunk and / or writing to a file if filepath is set.
|
||||
## - CANCEL: cancels the download session
|
||||
|
||||
import std/[options, streams]
|
||||
import chronos
|
||||
import chronicles
|
||||
import libp2p/stream/[lpstream]
|
||||
import serde/json as serde
|
||||
import ../../alloc
|
||||
import ../../../codex/units
|
||||
import ../../../codex/codextypes
|
||||
|
||||
from ../../../codex/codex import CodexServer, node
|
||||
from ../../../codex/node import retrieve, fetchManifest
|
||||
from ../../../codex/rest/json import `%`, RestContent
|
||||
from libp2p import Cid, init, `$`
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexlibdownload"
|
||||
|
||||
type NodeDownloadMsgType* = enum
|
||||
INIT
|
||||
CHUNK
|
||||
STREAM
|
||||
CANCEL
|
||||
MANIFEST
|
||||
|
||||
type OnChunkHandler = proc(bytes: seq[byte]): void {.gcsafe, raises: [].}
|
||||
|
||||
type NodeDownloadRequest* = object
|
||||
operation: NodeDownloadMsgType
|
||||
cid: cstring
|
||||
chunkSize: csize_t
|
||||
local: bool
|
||||
filepath: cstring
|
||||
|
||||
type
|
||||
DownloadSessionId* = string
|
||||
DownloadSessionCount* = int
|
||||
DownloadSession* = object
|
||||
stream: LPStream
|
||||
chunkSize: int
|
||||
|
||||
var downloadSessions {.threadvar.}: Table[DownloadSessionId, DownloadSession]
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeDownloadRequest,
|
||||
op: NodeDownloadMsgType,
|
||||
cid: cstring = "",
|
||||
chunkSize: csize_t = 0,
|
||||
local: bool = false,
|
||||
filepath: cstring = "",
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].cid = cid.alloc()
|
||||
ret[].chunkSize = chunkSize
|
||||
ret[].local = local
|
||||
ret[].filepath = filepath.alloc()
|
||||
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeDownloadRequest) =
|
||||
deallocShared(self[].cid)
|
||||
deallocShared(self[].filepath)
|
||||
deallocShared(self)
|
||||
|
||||
proc init(
|
||||
codex: ptr CodexServer, cCid: cstring = "", chunkSize: csize_t = 0, local: bool
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Init a new session to download the file identified by cid.
|
||||
##
|
||||
## If the session already exists, do nothing and return ok.
|
||||
## Meaning that a cid can only have one active download session.
|
||||
## If the chunkSize is 0, the default block size will be used.
|
||||
## If local is true, the file will be retrived from the local store.
|
||||
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to download locally: cannot parse cid: " & $cCid)
|
||||
|
||||
if downloadSessions.contains($cid):
|
||||
return ok("Download session already exists.")
|
||||
|
||||
let node = codex[].node
|
||||
var stream: LPStream
|
||||
|
||||
try:
|
||||
let res = await node.retrieve(cid.get(), local)
|
||||
if res.isErr():
|
||||
return err("Failed to init the download: " & res.error.msg)
|
||||
stream = res.get()
|
||||
except CancelledError:
|
||||
downloadSessions.del($cid)
|
||||
return err("Failed to init the download: download cancelled.")
|
||||
|
||||
let blockSize = if chunkSize.int > 0: chunkSize.int else: DefaultBlockSize.int
|
||||
downloadSessions[$cid] = DownloadSession(stream: stream, chunkSize: blockSize)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc chunk(
|
||||
codex: ptr CodexServer, cCid: cstring = "", onChunk: OnChunkHandler
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Download the next chunk of the file identified by cid.
|
||||
## The chunk is passed to the onChunk handler.
|
||||
##
|
||||
## If the stream is at EOF, return ok with empty string.
|
||||
##
|
||||
## If an error is raised while reading the stream, the session is deleted
|
||||
## and an error is returned.
|
||||
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to download locally: cannot parse cid: " & $cCid)
|
||||
|
||||
if not downloadSessions.contains($cid):
|
||||
return err("Failed to download chunk: no session for cid " & $cid)
|
||||
|
||||
var session: DownloadSession
|
||||
try:
|
||||
session = downloadSessions[$cid]
|
||||
except KeyError:
|
||||
return err("Failed to download chunk: no session for cid " & $cid)
|
||||
|
||||
let stream = session.stream
|
||||
if stream.atEof:
|
||||
return ok("")
|
||||
|
||||
let chunkSize = session.chunkSize
|
||||
var buf = newSeq[byte](chunkSize)
|
||||
|
||||
try:
|
||||
let read = await stream.readOnce(addr buf[0], buf.len)
|
||||
buf.setLen(read)
|
||||
except LPStreamError as e:
|
||||
await stream.close()
|
||||
downloadSessions.del($cid)
|
||||
return err("Failed to download chunk: " & $e.msg)
|
||||
except CancelledError:
|
||||
await stream.close()
|
||||
downloadSessions.del($cid)
|
||||
return err("Failed to download chunk: download cancelled.")
|
||||
|
||||
if buf.len <= 0:
|
||||
return err("Failed to download chunk: no data")
|
||||
|
||||
onChunk(buf)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc streamData(
|
||||
codex: ptr CodexServer,
|
||||
stream: LPStream,
|
||||
onChunk: OnChunkHandler,
|
||||
chunkSize: csize_t,
|
||||
filepath: cstring,
|
||||
): Future[Result[string, string]] {.
|
||||
async: (raises: [CancelledError, LPStreamError, IOError])
|
||||
.} =
|
||||
let blockSize = if chunkSize.int > 0: chunkSize.int else: DefaultBlockSize.int
|
||||
var buf = newSeq[byte](blockSize)
|
||||
var read = 0
|
||||
var outputStream: OutputStreamHandle
|
||||
var filedest: string = $filepath
|
||||
|
||||
try:
|
||||
if filepath != "":
|
||||
outputStream = filedest.fileOutput()
|
||||
|
||||
while not stream.atEof:
|
||||
## Yield immediately to the event loop
|
||||
## It gives a chance to cancel request to be processed
|
||||
await sleepAsync(0)
|
||||
|
||||
let read = await stream.readOnce(addr buf[0], buf.len)
|
||||
buf.setLen(read)
|
||||
|
||||
if buf.len <= 0:
|
||||
break
|
||||
|
||||
onChunk(buf)
|
||||
|
||||
if outputStream != nil:
|
||||
outputStream.write(buf)
|
||||
|
||||
if outputStream != nil:
|
||||
outputStream.close()
|
||||
finally:
|
||||
if outputStream != nil:
|
||||
outputStream.close()
|
||||
|
||||
return ok("")
|
||||
|
||||
proc stream(
|
||||
codex: ptr CodexServer,
|
||||
cCid: cstring,
|
||||
chunkSize: csize_t,
|
||||
local: bool,
|
||||
filepath: cstring,
|
||||
onChunk: OnChunkHandler,
|
||||
): Future[Result[string, string]] {.raises: [], async: (raises: []).} =
|
||||
## Stream the file identified by cid, calling the onChunk handler for each chunk
|
||||
## and / or writing to a file if filepath is set.
|
||||
##
|
||||
## If local is true, the file will be retrieved from the local store.
|
||||
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to stream: cannot parse cid: " & $cCid)
|
||||
|
||||
if not downloadSessions.contains($cid):
|
||||
return err("Failed to stream: no session for cid " & $cid)
|
||||
|
||||
var session: DownloadSession
|
||||
try:
|
||||
session = downloadSessions[$cid]
|
||||
except KeyError:
|
||||
return err("Failed to stream: no session for cid " & $cid)
|
||||
|
||||
let node = codex[].node
|
||||
|
||||
try:
|
||||
let res =
|
||||
await noCancel codex.streamData(session.stream, onChunk, chunkSize, filepath)
|
||||
if res.isErr:
|
||||
return err($res.error)
|
||||
except LPStreamError as e:
|
||||
return err("Failed to stream file: " & $e.msg)
|
||||
except IOError as e:
|
||||
return err("Failed to stream file: " & $e.msg)
|
||||
finally:
|
||||
if session.stream != nil:
|
||||
await session.stream.close()
|
||||
downloadSessions.del($cid)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc cancel(
|
||||
codex: ptr CodexServer, cCid: cstring
|
||||
): Future[Result[string, string]] {.raises: [], async: (raises: []).} =
|
||||
## Cancel the download session identified by cid.
|
||||
## This operation is not supported when using the stream mode,
|
||||
## because the worker will be busy downloading the file.
|
||||
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to cancel : cannot parse cid: " & $cCid)
|
||||
|
||||
if not downloadSessions.contains($cid):
|
||||
# The session is already cancelled
|
||||
return ok("")
|
||||
|
||||
var session: DownloadSession
|
||||
try:
|
||||
session = downloadSessions[$cid]
|
||||
except KeyError:
|
||||
# The session is already cancelled
|
||||
return ok("")
|
||||
|
||||
let stream = session.stream
|
||||
await stream.close()
|
||||
downloadSessions.del($cCid)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc manifest(
|
||||
codex: ptr CodexServer, cCid: cstring
|
||||
): Future[Result[string, string]] {.raises: [], async: (raises: []).} =
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to fetch manifest: cannot parse cid: " & $cCid)
|
||||
|
||||
try:
|
||||
let node = codex[].node
|
||||
let manifest = await node.fetchManifest(cid.get())
|
||||
if manifest.isErr:
|
||||
return err("Failed to fetch manifest: " & manifest.error.msg)
|
||||
|
||||
return ok(serde.toJson(manifest.get()))
|
||||
except CancelledError:
|
||||
return err("Failed to fetch manifest: download cancelled.")
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeDownloadRequest, codex: ptr CodexServer, onChunk: OnChunkHandler
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of NodeDownloadMsgType.INIT:
|
||||
let res = (await init(codex, self.cid, self.chunkSize, self.local))
|
||||
if res.isErr:
|
||||
error "Failed to INIT.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeDownloadMsgType.CHUNK:
|
||||
let res = (await chunk(codex, self.cid, onChunk))
|
||||
if res.isErr:
|
||||
error "Failed to CHUNK.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeDownloadMsgType.STREAM:
|
||||
let res = (
|
||||
await stream(codex, self.cid, self.chunkSize, self.local, self.filepath, onChunk)
|
||||
)
|
||||
if res.isErr:
|
||||
error "Failed to STREAM.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeDownloadMsgType.CANCEL:
|
||||
let res = (await cancel(codex, self.cid))
|
||||
if res.isErr:
|
||||
error "Failed to CANCEL.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeDownloadMsgType.MANIFEST:
|
||||
let res = (await manifest(codex, self.cid))
|
||||
if res.isErr:
|
||||
error "Failed to MANIFEST.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
76
library/codex_thread_requests/requests/node_info_request.nim
Normal file
76
library/codex_thread_requests/requests/node_info_request.nim
Normal file
@ -0,0 +1,76 @@
|
||||
## This file contains the lifecycle request type that will be handled.
|
||||
|
||||
import std/[options]
|
||||
import chronos
|
||||
import chronicles
|
||||
import confutils
|
||||
import codexdht/discv5/spr
|
||||
import ../../../codex/conf
|
||||
import ../../../codex/rest/json
|
||||
import ../../../codex/node
|
||||
|
||||
from ../../../codex/codex import CodexServer, config, node
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexlibinfo"
|
||||
|
||||
type NodeInfoMsgType* = enum
|
||||
REPO
|
||||
SPR
|
||||
PEERID
|
||||
|
||||
type NodeInfoRequest* = object
|
||||
operation: NodeInfoMsgType
|
||||
|
||||
proc createShared*(T: type NodeInfoRequest, op: NodeInfoMsgType): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeInfoRequest) =
|
||||
deallocShared(self)
|
||||
|
||||
proc getRepo(
|
||||
codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
return ok($(codex[].config.dataDir))
|
||||
|
||||
proc getSpr(
|
||||
codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let spr = codex[].node.discovery.dhtRecord
|
||||
if spr.isNone:
|
||||
return err("Failed to get SPR: no SPR record found.")
|
||||
|
||||
return ok(spr.get.toURI)
|
||||
|
||||
proc getPeerId(
|
||||
codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
return ok($codex[].node.switch.peerInfo.peerId)
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeInfoRequest, codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of REPO:
|
||||
let res = (await getRepo(codex))
|
||||
if res.isErr:
|
||||
error "Failed to get REPO.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of SPR:
|
||||
let res = (await getSpr(codex))
|
||||
if res.isErr:
|
||||
error "Failed to get SPR.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of PEERID:
|
||||
let res = (await getPeerId(codex))
|
||||
if res.isErr:
|
||||
error "Failed to get PEERID.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
@ -0,0 +1,188 @@
|
||||
## This file contains the lifecycle request type that will be handled.
|
||||
## CREATE_NODE: create a new Codex node with the provided config.json.
|
||||
## START_NODE: start the provided Codex node.
|
||||
## STOP_NODE: stop the provided Codex node.
|
||||
|
||||
import std/[options, json, strutils, net, os]
|
||||
import codexdht/discv5/spr
|
||||
import stew/shims/parseutils
|
||||
import contractabi/address
|
||||
import chronos
|
||||
import chronicles
|
||||
import results
|
||||
import confutils
|
||||
import confutils/std/net
|
||||
import confutils/defs
|
||||
import libp2p
|
||||
import json_serialization
|
||||
import json_serialization/std/[options, net]
|
||||
import ../../alloc
|
||||
import ../../../codex/conf
|
||||
import ../../../codex/utils
|
||||
import ../../../codex/utils/[keyutils, fileutils]
|
||||
import ../../../codex/units
|
||||
|
||||
from ../../../codex/codex import CodexServer, new, start, stop, close
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexliblifecycle"
|
||||
|
||||
type NodeLifecycleMsgType* = enum
|
||||
CREATE_NODE
|
||||
START_NODE
|
||||
STOP_NODE
|
||||
CLOSE_NODE
|
||||
|
||||
proc readValue*[T: InputFile | InputDir | OutPath | OutDir | OutFile](
|
||||
r: var JsonReader, val: var T
|
||||
) =
|
||||
val = T(r.readValue(string))
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var MultiAddress) =
|
||||
val = MultiAddress.init(r.readValue(string)).get()
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var NatConfig) =
|
||||
let res = NatConfig.parse(r.readValue(string))
|
||||
if res.isErr:
|
||||
raise
|
||||
newException(SerializationError, "Cannot parse the NAT config: " & res.error())
|
||||
val = res.get()
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var SignedPeerRecord) =
|
||||
let res = SignedPeerRecord.parse(r.readValue(string))
|
||||
if res.isErr:
|
||||
raise
|
||||
newException(SerializationError, "Cannot parse the signed peer: " & res.error())
|
||||
val = res.get()
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var ThreadCount) =
|
||||
val = ThreadCount(r.readValue(int))
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var NBytes) =
|
||||
val = NBytes(r.readValue(int))
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var Duration) =
|
||||
var dur: Duration
|
||||
let input = r.readValue(string)
|
||||
let count = parseDuration(input, dur)
|
||||
if count == 0:
|
||||
raise newException(SerializationError, "Cannot parse the duration: " & input)
|
||||
val = dur
|
||||
|
||||
proc readValue*(r: var JsonReader, val: var EthAddress) =
|
||||
val = EthAddress.init(r.readValue(string)).get()
|
||||
|
||||
type NodeLifecycleRequest* = object
|
||||
operation: NodeLifecycleMsgType
|
||||
configJson: cstring
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configJson: cstring = ""
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].configJson = configJson.alloc()
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeLifecycleRequest) =
|
||||
deallocShared(self[].configJson)
|
||||
deallocShared(self)
|
||||
|
||||
proc createCodex(
|
||||
configJson: cstring
|
||||
): Future[Result[CodexServer, string]] {.async: (raises: []).} =
|
||||
var conf: CodexConf
|
||||
|
||||
try:
|
||||
conf = CodexConf.load(
|
||||
version = codexFullVersion,
|
||||
envVarsPrefix = "codex",
|
||||
cmdLine = @[],
|
||||
secondarySources = proc(
|
||||
config: CodexConf, sources: auto
|
||||
) {.gcsafe, raises: [ConfigurationError].} =
|
||||
if configJson.len > 0:
|
||||
sources.addConfigFileContent(Json, $(configJson))
|
||||
,
|
||||
)
|
||||
except ConfigurationError as e:
|
||||
return err("Failed to create codex: unable to load configuration: " & e.msg)
|
||||
|
||||
conf.setupLogging()
|
||||
|
||||
try:
|
||||
{.gcsafe.}:
|
||||
updateLogLevel(conf.logLevel)
|
||||
except ValueError as err:
|
||||
return err("Failed to create codex: invalid value for log level: " & err.msg)
|
||||
|
||||
conf.setupMetrics()
|
||||
|
||||
if not (checkAndCreateDataDir((conf.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
return err(
|
||||
"Failed to create codex: unable to access/create data folder or data folder's permissions are insecure."
|
||||
)
|
||||
|
||||
if not (checkAndCreateDataDir((conf.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
return err(
|
||||
"Failed to create codex: unable to access/create data folder or data folder's permissions are insecure."
|
||||
)
|
||||
|
||||
let keyPath =
|
||||
if isAbsolute(conf.netPrivKeyFile):
|
||||
conf.netPrivKeyFile
|
||||
else:
|
||||
conf.dataDir / conf.netPrivKeyFile
|
||||
let privateKey = setupKey(keyPath)
|
||||
if privateKey.isErr:
|
||||
return err("Failed to create codex: unable to get the private key.")
|
||||
let pk = privateKey.get()
|
||||
|
||||
conf.apiBindAddress = string.none
|
||||
|
||||
let server =
|
||||
try:
|
||||
CodexServer.new(conf, pk)
|
||||
except Exception as exc:
|
||||
return err("Failed to create codex: " & exc.msg)
|
||||
|
||||
return ok(server)
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeLifecycleRequest, codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of CREATE_NODE:
|
||||
codex[] = (
|
||||
await createCodex(
|
||||
self.configJson # , self.appCallbacks
|
||||
)
|
||||
).valueOr:
|
||||
error "Failed to CREATE_NODE.", error = error
|
||||
return err($error)
|
||||
of START_NODE:
|
||||
try:
|
||||
await codex[].start()
|
||||
except Exception as e:
|
||||
error "Failed to START_NODE.", error = e.msg
|
||||
return err(e.msg)
|
||||
of STOP_NODE:
|
||||
try:
|
||||
await codex[].stop()
|
||||
except Exception as e:
|
||||
error "Failed to STOP_NODE.", error = e.msg
|
||||
return err(e.msg)
|
||||
of CLOSE_NODE:
|
||||
try:
|
||||
await codex[].close()
|
||||
except Exception as e:
|
||||
error "Failed to STOP_NODE.", error = e.msg
|
||||
return err(e.msg)
|
||||
return ok("")
|
||||
95
library/codex_thread_requests/requests/node_p2p_request.nim
Normal file
95
library/codex_thread_requests/requests/node_p2p_request.nim
Normal file
@ -0,0 +1,95 @@
|
||||
{.push raises: [].}
|
||||
|
||||
## This file contains the P2p request type that will be handled.
|
||||
## CONNECT: connect to a peer with the provided peer ID and optional addresses.
|
||||
|
||||
import std/[options]
|
||||
import chronos
|
||||
import chronicles
|
||||
import libp2p
|
||||
import ../../alloc
|
||||
import ../../../codex/node
|
||||
|
||||
from ../../../codex/codex import CodexServer, node
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexlibp2p"
|
||||
|
||||
type NodeP2PMsgType* = enum
|
||||
CONNECT
|
||||
|
||||
type NodeP2PRequest* = object
|
||||
operation: NodeP2PMsgType
|
||||
peerId: cstring
|
||||
peerAddresses: seq[cstring]
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeP2PRequest,
|
||||
op: NodeP2PMsgType,
|
||||
peerId: cstring = "",
|
||||
peerAddresses: seq[cstring] = @[],
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].peerId = peerId.alloc()
|
||||
ret[].peerAddresses = peerAddresses
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeP2PRequest) =
|
||||
deallocShared(self[].peerId)
|
||||
deallocShared(self)
|
||||
|
||||
proc connect(
|
||||
codex: ptr CodexServer, peerId: cstring, peerAddresses: seq[cstring] = @[]
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let node = codex[].node
|
||||
let res = PeerId.init($peerId)
|
||||
if res.isErr:
|
||||
return err("Failed to connect to peer: invalid peer ID: " & $res.error())
|
||||
|
||||
let id = res.get()
|
||||
|
||||
let addresses =
|
||||
if peerAddresses.len > 0:
|
||||
var addrs: seq[MultiAddress]
|
||||
for addrStr in peerAddresses:
|
||||
let res = MultiAddress.init($addrStr)
|
||||
if res.isOk:
|
||||
addrs.add(res[])
|
||||
else:
|
||||
return err("Failed to connect to peer: invalid address: " & $addrStr)
|
||||
addrs
|
||||
else:
|
||||
try:
|
||||
let peerRecord = await node.findPeer(id)
|
||||
if peerRecord.isNone:
|
||||
return err("Failed to connect to peer: peer not found.")
|
||||
|
||||
peerRecord.get().addresses.mapIt(it.address)
|
||||
except CancelledError:
|
||||
return err("Failed to connect to peer: operation cancelled.")
|
||||
except CatchableError as e:
|
||||
return err("Failed to connect to peer: " & $e.msg)
|
||||
|
||||
try:
|
||||
await node.connect(id, addresses)
|
||||
except CancelledError:
|
||||
return err("Failed to connect to peer: operation cancelled.")
|
||||
except CatchableError as e:
|
||||
return err("Failed to connect to peer: " & $e.msg)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeP2PRequest, codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of NodeP2PMsgType.CONNECT:
|
||||
let res = (await connect(codex, self.peerId, self.peerAddresses))
|
||||
if res.isErr:
|
||||
error "Failed to CONNECT.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
180
library/codex_thread_requests/requests/node_storage_request.nim
Normal file
180
library/codex_thread_requests/requests/node_storage_request.nim
Normal file
@ -0,0 +1,180 @@
|
||||
{.push raises: [].}
|
||||
|
||||
## This file contains the node storage request.
|
||||
## 4 operations are available:
|
||||
## - LIST: list all manifests stored in the node.
|
||||
## - DELETE: Deletes either a single block or an entire dataset from the local node.
|
||||
## - FETCH: download a file from the network to the local node.
|
||||
## - SPACE: get the amount of space used by the local node.
|
||||
## - EXISTS: check the existence of a cid in a node (local store).
|
||||
|
||||
import std/[options]
|
||||
import chronos
|
||||
import chronicles
|
||||
import libp2p/stream/[lpstream]
|
||||
import serde/json as serde
|
||||
import ../../alloc
|
||||
import ../../../codex/units
|
||||
import ../../../codex/manifest
|
||||
import ../../../codex/stores/repostore
|
||||
|
||||
from ../../../codex/codex import CodexServer, node, repoStore
|
||||
from ../../../codex/node import
|
||||
iterateManifests, fetchManifest, fetchDatasetAsyncTask, delete, hasLocalBlock
|
||||
from libp2p import Cid, init, `$`
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexlibstorage"
|
||||
|
||||
type NodeStorageMsgType* = enum
|
||||
LIST
|
||||
DELETE
|
||||
FETCH
|
||||
SPACE
|
||||
EXISTS
|
||||
|
||||
type NodeStorageRequest* = object
|
||||
operation: NodeStorageMsgType
|
||||
cid: cstring
|
||||
|
||||
type StorageSpace = object
|
||||
totalBlocks* {.serialize.}: Natural
|
||||
quotaMaxBytes* {.serialize.}: NBytes
|
||||
quotaUsedBytes* {.serialize.}: NBytes
|
||||
quotaReservedBytes* {.serialize.}: NBytes
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeStorageRequest, op: NodeStorageMsgType, cid: cstring = ""
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].cid = cid.alloc()
|
||||
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeStorageRequest) =
|
||||
deallocShared(self[].cid)
|
||||
deallocShared(self)
|
||||
|
||||
type ManifestWithCid = object
|
||||
cid {.serialize.}: string
|
||||
manifest {.serialize.}: Manifest
|
||||
|
||||
proc list(
|
||||
codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
var manifests = newSeq[ManifestWithCid]()
|
||||
proc onManifest(cid: Cid, manifest: Manifest) {.raises: [], gcsafe.} =
|
||||
manifests.add(ManifestWithCid(cid: $cid, manifest: manifest))
|
||||
|
||||
try:
|
||||
let node = codex[].node
|
||||
await node.iterateManifests(onManifest)
|
||||
except CancelledError:
|
||||
return err("Failed to list manifests: cancelled operation.")
|
||||
except CatchableError as err:
|
||||
return err("Failed to list manifest: : " & err.msg)
|
||||
|
||||
return ok(serde.toJson(manifests))
|
||||
|
||||
proc delete(
|
||||
codex: ptr CodexServer, cCid: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to delete the data: cannot parse cid: " & $cCid)
|
||||
|
||||
let node = codex[].node
|
||||
try:
|
||||
let res = await node.delete(cid.get())
|
||||
if res.isErr:
|
||||
return err("Failed to delete the data: " & res.error.msg)
|
||||
except CancelledError:
|
||||
return err("Failed to delete the data: cancelled operation.")
|
||||
except CatchableError as err:
|
||||
return err("Failed to delete the data: " & err.msg)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc fetch(
|
||||
codex: ptr CodexServer, cCid: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to fetch the data: cannot parse cid: " & $cCid)
|
||||
|
||||
try:
|
||||
let node = codex[].node
|
||||
let manifest = await node.fetchManifest(cid.get())
|
||||
if manifest.isErr:
|
||||
return err("Failed to fetch the data: " & manifest.error.msg)
|
||||
|
||||
node.fetchDatasetAsyncTask(manifest.get())
|
||||
|
||||
return ok(serde.toJson(manifest.get()))
|
||||
except CancelledError:
|
||||
return err("Failed to fetch the data: download cancelled.")
|
||||
|
||||
proc space(
|
||||
codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let repoStore = codex[].repoStore
|
||||
let space = StorageSpace(
|
||||
totalBlocks: repoStore.totalBlocks,
|
||||
quotaMaxBytes: repoStore.quotaMaxBytes,
|
||||
quotaUsedBytes: repoStore.quotaUsedBytes,
|
||||
quotaReservedBytes: repoStore.quotaReservedBytes,
|
||||
)
|
||||
return ok(serde.toJson(space))
|
||||
|
||||
proc exists(
|
||||
codex: ptr CodexServer, cCid: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
let cid = Cid.init($cCid)
|
||||
if cid.isErr:
|
||||
return err("Failed to check the data existence: cannot parse cid: " & $cCid)
|
||||
|
||||
try:
|
||||
let node = codex[].node
|
||||
let exists = await node.hasLocalBlock(cid.get())
|
||||
return ok($exists)
|
||||
except CancelledError:
|
||||
return err("Failed to check the data existence: operation cancelled.")
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeStorageRequest, codex: ptr CodexServer
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of NodeStorageMsgType.LIST:
|
||||
let res = (await list(codex))
|
||||
if res.isErr:
|
||||
error "Failed to LIST.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeStorageMsgType.DELETE:
|
||||
let res = (await delete(codex, self.cid))
|
||||
if res.isErr:
|
||||
error "Failed to DELETE.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeStorageMsgType.FETCH:
|
||||
let res = (await fetch(codex, self.cid))
|
||||
if res.isErr:
|
||||
error "Failed to FETCH.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeStorageMsgType.SPACE:
|
||||
let res = (await space(codex))
|
||||
if res.isErr:
|
||||
error "Failed to SPACE.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeStorageMsgType.EXISTS:
|
||||
let res = (await exists(codex, self.cid))
|
||||
if res.isErr:
|
||||
error "Failed to EXISTS.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
372
library/codex_thread_requests/requests/node_upload_request.nim
Normal file
372
library/codex_thread_requests/requests/node_upload_request.nim
Normal file
@ -0,0 +1,372 @@
|
||||
{.push raises: [].}
|
||||
|
||||
## This file contains the upload request.
|
||||
## A session is created for each upload allowing to resume,
|
||||
## pause and cancel uploads (using chunks).
|
||||
##
|
||||
## There are two ways to upload a file:
|
||||
## 1. Via chunks: the filepath parameter is the data filename. Steps are:
|
||||
## - INIT: creates a new upload session and returns its ID.
|
||||
## - CHUNK: sends a chunk of data to the upload session.
|
||||
## - FINALIZE: finalizes the upload and returns the CID of the uploaded file.
|
||||
## - CANCEL: cancels the upload session.
|
||||
##
|
||||
## 2. Directly from a file path: the filepath has to be absolute.
|
||||
## - INIT: creates a new upload session and returns its ID
|
||||
## - FILE: starts the upload and returns the CID of the uploaded file
|
||||
## - CANCEL: cancels the upload session.
|
||||
|
||||
import std/[options, os, mimetypes]
|
||||
import chronos
|
||||
import chronicles
|
||||
import questionable
|
||||
import questionable/results
|
||||
import faststreams/inputs
|
||||
import libp2p/stream/[bufferstream, lpstream]
|
||||
import ../../alloc
|
||||
import ../../../codex/units
|
||||
import ../../../codex/codextypes
|
||||
|
||||
from ../../../codex/codex import CodexServer, node
|
||||
from ../../../codex/node import store
|
||||
from libp2p import Cid, `$`
|
||||
|
||||
logScope:
|
||||
topics = "codexlib codexlibupload"
|
||||
|
||||
type NodeUploadMsgType* = enum
|
||||
INIT
|
||||
CHUNK
|
||||
FINALIZE
|
||||
CANCEL
|
||||
FILE
|
||||
|
||||
type OnProgressHandler = proc(bytes: int): void {.gcsafe, raises: [].}
|
||||
|
||||
type NodeUploadRequest* = object
|
||||
operation: NodeUploadMsgType
|
||||
sessionId: cstring
|
||||
filepath: cstring
|
||||
chunk: seq[byte]
|
||||
chunkSize: csize_t
|
||||
|
||||
type
|
||||
UploadSessionId* = string
|
||||
UploadSessionCount* = int
|
||||
UploadSession* = object
|
||||
stream: BufferStream
|
||||
fut: Future[?!Cid]
|
||||
filepath: string
|
||||
chunkSize: int
|
||||
onProgress: OnProgressHandler
|
||||
|
||||
var uploadSessions {.threadvar.}: Table[UploadSessionId, UploadSession]
|
||||
var nexUploadSessionCount {.threadvar.}: UploadSessionCount
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeUploadRequest,
|
||||
op: NodeUploadMsgType,
|
||||
sessionId: cstring = "",
|
||||
filepath: cstring = "",
|
||||
chunk: seq[byte] = @[],
|
||||
chunkSize: csize_t = 0,
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].sessionId = sessionId.alloc()
|
||||
ret[].filepath = filepath.alloc()
|
||||
ret[].chunk = chunk
|
||||
ret[].chunkSize = chunkSize
|
||||
|
||||
return ret
|
||||
|
||||
proc destroyShared(self: ptr NodeUploadRequest) =
|
||||
deallocShared(self[].filepath)
|
||||
deallocShared(self[].sessionId)
|
||||
deallocShared(self)
|
||||
|
||||
proc init(
|
||||
codex: ptr CodexServer, filepath: cstring = "", chunkSize: csize_t = 0
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Init a new session upload and return its ID.
|
||||
## The session contains the future corresponding to the
|
||||
## `node.store` call.
|
||||
## The filepath can be:
|
||||
## - the filename when uploading via chunks
|
||||
## - the absolute path to a file when uploading directly.
|
||||
## The mimetype is deduced from the filename extension.
|
||||
##
|
||||
## The chunkSize matches by default the block size used to store the file.
|
||||
##
|
||||
## A callback `onBlockStore` is provided to `node.store` to
|
||||
## report the progress of the upload. This callback will check
|
||||
## that an `onProgress` handler is set in the session
|
||||
## and call it with the number of bytes stored each time a block
|
||||
## is stored.
|
||||
|
||||
var filenameOpt, mimetypeOpt = string.none
|
||||
|
||||
if isAbsolute($filepath):
|
||||
if not fileExists($filepath):
|
||||
return err(
|
||||
"Failed to create an upload session, the filepath does not exist: " & $filepath
|
||||
)
|
||||
|
||||
if filepath != "":
|
||||
let (_, name, ext) = splitFile($filepath)
|
||||
|
||||
filenameOpt = (name & ext).some
|
||||
|
||||
if ext != "":
|
||||
let extNoDot =
|
||||
if ext.len > 0:
|
||||
ext[1 ..^ 1]
|
||||
else:
|
||||
""
|
||||
let mime = newMimetypes()
|
||||
let mimetypeStr = mime.getMimetype(extNoDot, "")
|
||||
|
||||
mimetypeOpt = if mimetypeStr == "": string.none else: mimetypeStr.some
|
||||
|
||||
let sessionId = $nexUploadSessionCount
|
||||
nexUploadSessionCount.inc()
|
||||
|
||||
let stream = BufferStream.new()
|
||||
let lpStream = LPStream(stream)
|
||||
let node = codex[].node
|
||||
|
||||
let onBlockStored = proc(chunk: seq[byte]): void {.gcsafe, raises: [].} =
|
||||
try:
|
||||
if uploadSessions.contains($sessionId):
|
||||
let session = uploadSessions[$sessionId]
|
||||
if session.onProgress != nil:
|
||||
session.onProgress(chunk.len)
|
||||
except KeyError:
|
||||
error "Failed to push progress update, session is not found: ",
|
||||
sessionId = $sessionId
|
||||
|
||||
let blockSize =
|
||||
if chunkSize.NBytes > 0.NBytes: chunkSize.NBytes else: DefaultBlockSize
|
||||
let fut = node.store(lpStream, filenameOpt, mimetypeOpt, blockSize, onBlockStored)
|
||||
|
||||
uploadSessions[sessionId] = UploadSession(
|
||||
stream: stream, fut: fut, filepath: $filepath, chunkSize: blockSize.int
|
||||
)
|
||||
|
||||
return ok(sessionId)
|
||||
|
||||
proc chunk(
|
||||
codex: ptr CodexServer, sessionId: cstring, chunk: seq[byte]
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Upload a chunk of data to the session identified by sessionId.
|
||||
## The chunk is pushed to the BufferStream of the session.
|
||||
## If the chunk size is equal or greater than the session chunkSize,
|
||||
## the `onProgress` callback is temporarily set to receive the progress
|
||||
## from `onBlockStored` callback. This provide a way to report progress
|
||||
## precisely when a block is stored.
|
||||
## If the chunk size is smaller than the session chunkSize,
|
||||
## the `onProgress` callback is not set because the LPStream will
|
||||
## wait until enough data is received to form a block before storing it.
|
||||
## The wrapper may then report the progress because the data is in the stream
|
||||
## but not yet stored.
|
||||
|
||||
if not uploadSessions.contains($sessionId):
|
||||
return err("Failed to upload the chunk, the session is not found: " & $sessionId)
|
||||
|
||||
var fut = newFuture[void]()
|
||||
|
||||
try:
|
||||
let session = uploadSessions[$sessionId]
|
||||
|
||||
if chunk.len >= session.chunkSize:
|
||||
uploadSessions[$sessionId].onProgress = proc(
|
||||
bytes: int
|
||||
): void {.gcsafe, raises: [].} =
|
||||
fut.complete()
|
||||
await session.stream.pushData(chunk)
|
||||
else:
|
||||
fut = session.stream.pushData(chunk)
|
||||
|
||||
await fut
|
||||
|
||||
uploadSessions[$sessionId].onProgress = nil
|
||||
except KeyError:
|
||||
return err("Failed to upload the chunk, the session is not found: " & $sessionId)
|
||||
except LPError as e:
|
||||
return err("Failed to upload the chunk, stream error: " & $e.msg)
|
||||
except CancelledError:
|
||||
return err("Failed to upload the chunk, operation cancelled.")
|
||||
except CatchableError as e:
|
||||
return err("Failed to upload the chunk: " & $e.msg)
|
||||
finally:
|
||||
if not fut.finished():
|
||||
fut.cancelSoon()
|
||||
|
||||
return ok("")
|
||||
|
||||
proc finalize(
|
||||
codex: ptr CodexServer, sessionId: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Finalize the upload session identified by sessionId.
|
||||
## This closes the BufferStream and waits for the `node.store` future
|
||||
## to complete. It returns the CID of the uploaded file.
|
||||
|
||||
if not uploadSessions.contains($sessionId):
|
||||
return
|
||||
err("Failed to finalize the upload session, session not found: " & $sessionId)
|
||||
|
||||
var session: UploadSession
|
||||
try:
|
||||
session = uploadSessions[$sessionId]
|
||||
await session.stream.pushEof()
|
||||
|
||||
let res = await session.fut
|
||||
if res.isErr:
|
||||
return err("Failed to finalize the upload session: " & res.error().msg)
|
||||
|
||||
return ok($res.get())
|
||||
except KeyError:
|
||||
return
|
||||
err("Failed to finalize the upload session, invalid session ID: " & $sessionId)
|
||||
except LPStreamError as e:
|
||||
return err("Failed to finalize the upload session, stream error: " & $e.msg)
|
||||
except CancelledError:
|
||||
return err("Failed to finalize the upload session, operation cancelled")
|
||||
except CatchableError as e:
|
||||
return err("Failed to finalize the upload session: " & $e.msg)
|
||||
finally:
|
||||
if uploadSessions.contains($sessionId):
|
||||
uploadSessions.del($sessionId)
|
||||
|
||||
if session.fut != nil and not session.fut.finished():
|
||||
session.fut.cancelSoon()
|
||||
|
||||
proc cancel(
|
||||
codex: ptr CodexServer, sessionId: cstring
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Cancel the upload session identified by sessionId.
|
||||
## This cancels the `node.store` future and removes the session
|
||||
## from the table.
|
||||
|
||||
if not uploadSessions.contains($sessionId):
|
||||
# Session not found, nothing to cancel
|
||||
return ok("")
|
||||
|
||||
try:
|
||||
let session = uploadSessions[$sessionId]
|
||||
session.fut.cancelSoon()
|
||||
except KeyError:
|
||||
# Session not found, nothing to cancel
|
||||
return ok("")
|
||||
|
||||
uploadSessions.del($sessionId)
|
||||
|
||||
return ok("")
|
||||
|
||||
proc streamFile(
|
||||
filepath: string, stream: BufferStream, chunkSize: int
|
||||
): Future[Result[void, string]] {.async: (raises: [CancelledError]).} =
|
||||
## Streams a file from the given filepath using faststream.
|
||||
## fsMultiSync cannot be used with chronos because of this warning:
|
||||
## Warning: chronos backend uses nested calls to `waitFor` which
|
||||
## is not supported by chronos - it is not recommended to use it until
|
||||
## this has been resolved.
|
||||
##
|
||||
## Ideally when it is solved, we should use fsMultiSync or find a way to use async
|
||||
## file I/O with chronos, see https://github.com/status-im/nim-chronos/issues/501.
|
||||
|
||||
try:
|
||||
let inputStreamHandle = filepath.fileInput()
|
||||
let inputStream = inputStreamHandle.implicitDeref
|
||||
|
||||
var buf = newSeq[byte](chunkSize)
|
||||
while inputStream.readable:
|
||||
let read = inputStream.readIntoEx(buf)
|
||||
if read == 0:
|
||||
break
|
||||
await stream.pushData(buf[0 ..< read])
|
||||
# let byt = inputStream.read
|
||||
# await stream.pushData(@[byt])
|
||||
return ok()
|
||||
except IOError, OSError, LPStreamError:
|
||||
let e = getCurrentException()
|
||||
return err("Failed to stream the file: " & $e.msg)
|
||||
|
||||
proc file(
|
||||
codex: ptr CodexServer, sessionId: cstring, onProgress: OnProgressHandler
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
## Starts the file upload for the session identified by sessionId.
|
||||
## Will call finalize when done and return the CID of the uploaded file.
|
||||
##
|
||||
## The onProgress callback is called with the number of bytes
|
||||
## to report the progress of the upload.
|
||||
|
||||
if not uploadSessions.contains($sessionId):
|
||||
return err("Failed to upload the file, invalid session ID: " & $sessionId)
|
||||
|
||||
var session: UploadSession
|
||||
|
||||
try:
|
||||
uploadSessions[$sessionId].onProgress = onProgress
|
||||
session = uploadSessions[$sessionId]
|
||||
|
||||
let res = await streamFile(session.filepath, session.stream, session.chunkSize)
|
||||
if res.isErr:
|
||||
return err("Failed to upload the file: " & res.error)
|
||||
|
||||
return await codex.finalize(sessionId)
|
||||
except KeyError:
|
||||
return err("Failed to upload the file, the session is not found: " & $sessionId)
|
||||
except LPStreamError, IOError:
|
||||
let e = getCurrentException()
|
||||
return err("Failed to upload the file: " & $e.msg)
|
||||
except CancelledError:
|
||||
return err("Failed to upload the file, the operation is cancelled.")
|
||||
except CatchableError as e:
|
||||
return err("Failed to upload the file: " & $e.msg)
|
||||
finally:
|
||||
if uploadSessions.contains($sessionId):
|
||||
uploadSessions.del($sessionId)
|
||||
|
||||
if session.fut != nil and not session.fut.finished():
|
||||
session.fut.cancelSoon()
|
||||
|
||||
proc process*(
|
||||
self: ptr NodeUploadRequest,
|
||||
codex: ptr CodexServer,
|
||||
onUploadProgress: OnProgressHandler = nil,
|
||||
): Future[Result[string, string]] {.async: (raises: []).} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
case self.operation
|
||||
of NodeUploadMsgType.INIT:
|
||||
let res = (await init(codex, self.filepath, self.chunkSize))
|
||||
if res.isErr:
|
||||
error "Failed to INIT.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeUploadMsgType.CHUNK:
|
||||
let res = (await chunk(codex, self.sessionId, self.chunk))
|
||||
if res.isErr:
|
||||
error "Failed to CHUNK.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeUploadMsgType.FINALIZE:
|
||||
let res = (await finalize(codex, self.sessionId))
|
||||
if res.isErr:
|
||||
error "Failed to FINALIZE.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeUploadMsgType.CANCEL:
|
||||
let res = (await cancel(codex, self.sessionId))
|
||||
if res.isErr:
|
||||
error "Failed to CANCEL.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
of NodeUploadMsgType.FILE:
|
||||
let res = (await file(codex, self.sessionId, onUploadProgress))
|
||||
if res.isErr:
|
||||
error "Failed to FILE.", error = res.error
|
||||
return err($res.error)
|
||||
return res
|
||||
14
library/events/json_base_event.nim
Normal file
14
library/events/json_base_event.nim
Normal file
@ -0,0 +1,14 @@
|
||||
# JSON Event definition
|
||||
#
|
||||
# This file defines de JsonEvent type, which serves as the base
|
||||
# for all event types in the library
|
||||
#
|
||||
# Reference specification:
|
||||
# https://github.com/vacp2p/rfc/blob/master/content/docs/rfcs/36/README.md#jsonsignal-type
|
||||
|
||||
type JsonEvent* = ref object of RootObj
|
||||
eventType* {.requiresInit.}: string
|
||||
|
||||
method `$`*(jsonEvent: JsonEvent): string {.base.} =
|
||||
discard
|
||||
# All events should implement this
|
||||
62
library/ffi_types.nim
Normal file
62
library/ffi_types.nim
Normal file
@ -0,0 +1,62 @@
|
||||
# FFI Types and Utilities
|
||||
#
|
||||
# This file defines the core types and utilities for the library's foreign
|
||||
# function interface (FFI), enabling interoperability with external code.
|
||||
|
||||
################################################################################
|
||||
### Exported types
|
||||
import results
|
||||
|
||||
type CodexCallback* = proc(
|
||||
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
|
||||
) {.cdecl, gcsafe, raises: [].}
|
||||
|
||||
const RET_OK*: cint = 0
|
||||
const RET_ERR*: cint = 1
|
||||
const RET_MISSING_CALLBACK*: cint = 2
|
||||
const RET_PROGRESS*: cint = 3
|
||||
|
||||
## Returns RET_OK as acknowledgment and call the callback
|
||||
## with RET_OK code and the provided message.
|
||||
proc success*(callback: CodexCallback, msg: string, userData: pointer): cint =
|
||||
callback(RET_OK, cast[ptr cchar](msg), cast[csize_t](len(msg)), userData)
|
||||
|
||||
return RET_OK
|
||||
|
||||
## Returns RET_ERR as acknowledgment and call the callback
|
||||
## with RET_ERR code and the provided message.
|
||||
proc error*(callback: CodexCallback, msg: string, userData: pointer): cint =
|
||||
let msg = "libcodex error: " & msg
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
|
||||
return RET_ERR
|
||||
|
||||
## Returns RET_OK as acknowledgment if the result is ok.
|
||||
## If not, return RET_ERR and call the callback with the error message.
|
||||
proc okOrError*[T](
|
||||
callback: CodexCallback, res: Result[T, string], userData: pointer
|
||||
): cint =
|
||||
if res.isOk:
|
||||
return RET_OK
|
||||
|
||||
return callback.error($res.error, userData)
|
||||
|
||||
### End of exported types
|
||||
################################################################################
|
||||
|
||||
################################################################################
|
||||
### FFI utils
|
||||
|
||||
template foreignThreadGc*(body: untyped) =
|
||||
when declared(setupForeignThreadGc):
|
||||
setupForeignThreadGc()
|
||||
|
||||
body
|
||||
|
||||
when declared(tearDownForeignThreadGc):
|
||||
tearDownForeignThreadGc()
|
||||
|
||||
type onDone* = proc()
|
||||
|
||||
### End of FFI utils
|
||||
################################################################################
|
||||
206
library/libcodex.h
Normal file
206
library/libcodex.h
Normal file
@ -0,0 +1,206 @@
|
||||
/**
|
||||
* libcodex.h - C Interface for Example Library
|
||||
*
|
||||
* This header provides the public API for libcodex
|
||||
*
|
||||
* To see the auto-generated header by Nim, run `make libcodex` from the
|
||||
* repository root. The generated file will be created at:
|
||||
* nimcache/release/libcodex/libcodex.h
|
||||
*/
|
||||
|
||||
#ifndef __libcodex__
|
||||
#define __libcodex__
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// The possible returned values for the functions that return int
|
||||
#define RET_OK 0
|
||||
#define RET_ERR 1
|
||||
#define RET_MISSING_CALLBACK 2
|
||||
#define RET_PROGRESS 3
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef void (*CodexCallback) (int callerRet, const char* msg, size_t len, void* userData);
|
||||
|
||||
void* codex_new(
|
||||
const char* configJson,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_version(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_revision(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_repo(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_debug(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_spr(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_peer_id(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_log_level(
|
||||
void* ctx,
|
||||
const char* logLevel,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_connect(
|
||||
void* ctx,
|
||||
const char* peerId,
|
||||
const char** peerAddresses,
|
||||
size_t peerAddressesSize,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_peer_debug(
|
||||
void* ctx,
|
||||
const char* peerId,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
|
||||
int codex_upload_init(
|
||||
void* ctx,
|
||||
const char* filepath,
|
||||
size_t chunkSize,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_upload_chunk(
|
||||
void* ctx,
|
||||
const char* sessionId,
|
||||
const uint8_t* chunk,
|
||||
size_t len,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_upload_finalize(
|
||||
void* ctx,
|
||||
const char* sessionId,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_upload_cancel(
|
||||
void* ctx,
|
||||
const char* sessionId,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_upload_file(
|
||||
void* ctx,
|
||||
const char* sessionId,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_download_stream(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
size_t chunkSize,
|
||||
bool local,
|
||||
const char* filepath,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_download_init(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
size_t chunkSize,
|
||||
bool local,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_download_chunk(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_download_cancel(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_download_manifest(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_storage_list(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_storage_space(
|
||||
void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_storage_delete(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_storage_fetch(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_storage_exists(
|
||||
void* ctx,
|
||||
const char* cid,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_start(void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_stop(void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
int codex_close(void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
// Destroys an instance of a codex node created with codex_new
|
||||
int codex_destroy(void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
void codex_set_event_callback(void* ctx,
|
||||
CodexCallback callback,
|
||||
void* userData);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __libcodex__ */
|
||||
565
library/libcodex.nim
Normal file
565
library/libcodex.nim
Normal file
@ -0,0 +1,565 @@
|
||||
# libcodex.nim - C-exported interface for the Codex shared library
|
||||
#
|
||||
# This file implements the public C API for libcodex.
|
||||
# It acts as the bridge between C programs and the internal Nim implementation.
|
||||
#
|
||||
# This file defines:
|
||||
# - Initialization logic for the Nim runtime (once per process)
|
||||
# - Thread-safe exported procs callable from C
|
||||
# - Callback registration and invocation for asynchronous communication
|
||||
|
||||
# cdecl is C declaration calling convention.
|
||||
# It’s the standard way C compilers expect functions to behave:
|
||||
# 1- Caller cleans up the stack after the call
|
||||
# 2- Symbol names are exported in a predictable way
|
||||
# In other termes, it is a glue that makes Nim functions callable as normal C functions.
|
||||
{.pragma: exported, exportc, cdecl, raises: [].}
|
||||
{.pragma: callback, cdecl, raises: [], gcsafe.}
|
||||
|
||||
# Ensure code is position-independent so it can be built into a shared library (.so).
|
||||
# In other terms, the code that can run no matter where it’s placed in memory.
|
||||
{.passc: "-fPIC".}
|
||||
|
||||
when defined(linux):
|
||||
# Define the canonical name for this library
|
||||
{.passl: "-Wl,-soname,libcodex.so".}
|
||||
|
||||
import std/[atomics]
|
||||
import chronicles
|
||||
import chronos
|
||||
import chronos/threadsync
|
||||
import ./codex_context
|
||||
import ./codex_thread_requests/codex_thread_request
|
||||
import ./codex_thread_requests/requests/node_lifecycle_request
|
||||
import ./codex_thread_requests/requests/node_info_request
|
||||
import ./codex_thread_requests/requests/node_debug_request
|
||||
import ./codex_thread_requests/requests/node_p2p_request
|
||||
import ./codex_thread_requests/requests/node_upload_request
|
||||
import ./codex_thread_requests/requests/node_download_request
|
||||
import ./codex_thread_requests/requests/node_storage_request
|
||||
import ./ffi_types
|
||||
|
||||
from ../codex/conf import codexVersion
|
||||
|
||||
logScope:
|
||||
topics = "codexlib"
|
||||
|
||||
template checkLibcodexParams*(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
) =
|
||||
if not isNil(ctx):
|
||||
ctx[].userData = userData
|
||||
|
||||
if isNil(callback):
|
||||
return RET_MISSING_CALLBACK
|
||||
|
||||
# From Nim doc:
|
||||
# "the C targets require you to initialize Nim's internals, which is done calling a NimMain function."
|
||||
# "The name NimMain can be influenced via the --nimMainPrefix:prefix switch."
|
||||
# "Use --nimMainPrefix:MyLib and the function to call is named MyLibNimMain."
|
||||
proc libcodexNimMain() {.importc.}
|
||||
|
||||
# Atomic flag to prevent multiple initializations
|
||||
var initialized: Atomic[bool]
|
||||
|
||||
if defined(android):
|
||||
# Redirect chronicles to Android System logs
|
||||
when compiles(defaultChroniclesStream.outputs[0].writer):
|
||||
defaultChroniclesStream.outputs[0].writer = proc(
|
||||
logLevel: LogLevel, msg: LogOutputStr
|
||||
) {.raises: [].} =
|
||||
echo logLevel, msg
|
||||
|
||||
# Initializes the Nim runtime and foreign-thread GC
|
||||
proc initializeLibrary() {.exported.} =
|
||||
if not initialized.exchange(true):
|
||||
## Every Nim library must call `<prefix>NimMain()` once
|
||||
libcodexNimMain()
|
||||
when declared(setupForeignThreadGc):
|
||||
setupForeignThreadGc()
|
||||
when declared(nimGC_setStackBottom):
|
||||
var locals {.volatile, noinit.}: pointer
|
||||
locals = addr(locals)
|
||||
nimGC_setStackBottom(locals)
|
||||
|
||||
proc codex_new(
|
||||
configJson: cstring, callback: CodexCallback, userData: pointer
|
||||
): pointer {.dynlib, exported.} =
|
||||
initializeLibrary()
|
||||
|
||||
if isNil(callback):
|
||||
error "Failed to create codex instance: the callback is missing."
|
||||
return nil
|
||||
|
||||
var ctx = codex_context.createCodexContext().valueOr:
|
||||
let msg = $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return nil
|
||||
|
||||
ctx.userData = userData
|
||||
|
||||
let reqContent =
|
||||
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE_NODE, configJson)
|
||||
|
||||
codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.LIFECYCLE, reqContent, callback, userData
|
||||
).isOkOr:
|
||||
let msg = $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return nil
|
||||
|
||||
return ctx
|
||||
|
||||
proc codex_version(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
callback(
|
||||
RET_OK,
|
||||
cast[ptr cchar](conf.codexVersion),
|
||||
cast[csize_t](len(conf.codexVersion)),
|
||||
userData,
|
||||
)
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc codex_revision(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
callback(
|
||||
RET_OK,
|
||||
cast[ptr cchar](conf.codexRevision),
|
||||
cast[csize_t](len(conf.codexRevision)),
|
||||
userData,
|
||||
)
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc codex_repo(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeInfoRequest.createShared(NodeInfoMsgType.REPO)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.INFO, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_debug(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeDebugRequest.createShared(NodeDebugMsgType.DEBUG)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DEBUG, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_spr(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeInfoRequest.createShared(NodeInfoMsgType.SPR)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.INFO, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_peer_id(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeInfoRequest.createShared(NodeInfoMsgType.PEERID)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.INFO, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
## Set the log level of the library at runtime.
|
||||
## It uses updateLogLevel which is a synchronous proc and
|
||||
## cannot be used inside an async context because of gcsafe issue.
|
||||
proc codex_log_level(
|
||||
ctx: ptr CodexContext, logLevel: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent =
|
||||
NodeDebugRequest.createShared(NodeDebugMsgType.LOG_LEVEL, logLevel = logLevel)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DEBUG, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_connect(
|
||||
ctx: ptr CodexContext,
|
||||
peerId: cstring,
|
||||
peerAddressesPtr: ptr cstring,
|
||||
peerAddressesLength: csize_t,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
var peerAddresses = newSeq[cstring](peerAddressesLength)
|
||||
let peers = cast[ptr UncheckedArray[cstring]](peerAddressesPtr)
|
||||
for i in 0 ..< peerAddressesLength:
|
||||
peerAddresses[i] = peers[i]
|
||||
|
||||
let reqContent = NodeP2PRequest.createShared(
|
||||
NodeP2PMsgType.CONNECT, peerId = peerId, peerAddresses = peerAddresses
|
||||
)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.P2P, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_peer_debug(
|
||||
ctx: ptr CodexContext, peerId: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeDebugRequest.createShared(NodeDebugMsgType.PEER, peerId = peerId)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DEBUG, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_close(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CLOSE_NODE)
|
||||
var res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.LIFECYCLE, reqContent, callback, userData
|
||||
)
|
||||
if res.isErr:
|
||||
return callback.error(res.error, userData)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_destroy(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let res = codex_context.destroyCodexContext(ctx)
|
||||
if res.isErr:
|
||||
return RET_ERR
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc codex_upload_init(
|
||||
ctx: ptr CodexContext,
|
||||
filepath: cstring,
|
||||
chunkSize: csize_t,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent = NodeUploadRequest.createShared(
|
||||
NodeUploadMsgType.INIT, filepath = filepath, chunkSize = chunkSize
|
||||
)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.UPLOAD, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_upload_chunk(
|
||||
ctx: ptr CodexContext,
|
||||
sessionId: cstring,
|
||||
data: ptr byte,
|
||||
len: csize_t,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let chunk = newSeq[byte](len)
|
||||
copyMem(addr chunk[0], data, len)
|
||||
|
||||
let reqContent = NodeUploadRequest.createShared(
|
||||
NodeUploadMsgType.CHUNK, sessionId = sessionId, chunk = chunk
|
||||
)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.UPLOAD, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_upload_finalize(
|
||||
ctx: ptr CodexContext,
|
||||
sessionId: cstring,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent =
|
||||
NodeUploadRequest.createShared(NodeUploadMsgType.FINALIZE, sessionId = sessionId)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.UPLOAD, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_upload_cancel(
|
||||
ctx: ptr CodexContext,
|
||||
sessionId: cstring,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent =
|
||||
NodeUploadRequest.createShared(NodeUploadMsgType.CANCEL, sessionId = sessionId)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.UPLOAD, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_upload_file(
|
||||
ctx: ptr CodexContext,
|
||||
sessionId: cstring,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent =
|
||||
NodeUploadRequest.createShared(NodeUploadMsgType.FILE, sessionId = sessionId)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.UPLOAD, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_download_init(
|
||||
ctx: ptr CodexContext,
|
||||
cid: cstring,
|
||||
chunkSize: csize_t,
|
||||
local: bool,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeDownloadRequest.createShared(
|
||||
NodeDownloadMsgType.INIT, cid = cid, chunkSize = chunkSize, local = local
|
||||
)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DOWNLOAD, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_download_chunk(
|
||||
ctx: ptr CodexContext, cid: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CHUNK, cid = cid)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DOWNLOAD, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_download_stream(
|
||||
ctx: ptr CodexContext,
|
||||
cid: cstring,
|
||||
chunkSize: csize_t,
|
||||
local: bool,
|
||||
filepath: cstring,
|
||||
callback: CodexCallback,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeDownloadRequest.createShared(
|
||||
NodeDownloadMsgType.STREAM,
|
||||
cid = cid,
|
||||
chunkSize = chunkSize,
|
||||
local = local,
|
||||
filepath = filepath,
|
||||
)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DOWNLOAD, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_download_cancel(
|
||||
ctx: ptr CodexContext, cid: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.CANCEL, cid = cid)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DOWNLOAD, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_download_manifest(
|
||||
ctx: ptr CodexContext, cid: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeDownloadRequest.createShared(NodeDownloadMsgType.MANIFEST, cid = cid)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.DOWNLOAD, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_storage_list(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeStorageRequest.createShared(NodeStorageMsgType.LIST)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.STORAGE, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_storage_space(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeStorageRequest.createShared(NodeStorageMsgType.SPACE)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.STORAGE, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_storage_delete(
|
||||
ctx: ptr CodexContext, cid: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeStorageRequest.createShared(NodeStorageMsgType.DELETE, cid = cid)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.STORAGE, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_storage_fetch(
|
||||
ctx: ptr CodexContext, cid: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeStorageRequest.createShared(NodeStorageMsgType.FETCH, cid = cid)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.STORAGE, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_storage_exists(
|
||||
ctx: ptr CodexContext, cid: cstring, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let req = NodeStorageRequest.createShared(NodeStorageMsgType.EXISTS, cid = cid)
|
||||
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.STORAGE, req, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_start(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent: ptr NodeLifecycleRequest =
|
||||
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.LIFECYCLE, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_stop(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibcodexParams(ctx, callback, userData)
|
||||
|
||||
let reqContent: ptr NodeLifecycleRequest =
|
||||
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE)
|
||||
let res = codex_context.sendRequestToCodexThread(
|
||||
ctx, RequestType.LIFECYCLE, reqContent, callback, userData
|
||||
)
|
||||
|
||||
return callback.okOrError(res, userData)
|
||||
|
||||
proc codex_set_event_callback(
|
||||
ctx: ptr CodexContext, callback: CodexCallback, userData: pointer
|
||||
) {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
ctx[].eventCallback = cast[pointer](callback)
|
||||
ctx[].eventUserData = userData
|
||||
28
openapi.yaml
28
openapi.yaml
@ -727,6 +727,34 @@ paths:
|
||||
"500":
|
||||
description: Well it was bad-bad
|
||||
|
||||
"/data/{cid}/exists":
|
||||
get:
|
||||
summary: "Check if a block identified by CID exists in the local node."
|
||||
tags: [Data]
|
||||
operationId: hasBlock
|
||||
parameters:
|
||||
- in: path
|
||||
name: cid
|
||||
required: true
|
||||
schema:
|
||||
$ref: "#/components/schemas/Cid"
|
||||
description: "CID of the block to check."
|
||||
responses:
|
||||
"200":
|
||||
description: Block existence information
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
has:
|
||||
type: boolean
|
||||
description: Indicates whether the block exists in the local node
|
||||
"400":
|
||||
description: Invalid CID is specified
|
||||
"500":
|
||||
description: Well it was bad-bad
|
||||
|
||||
"/space":
|
||||
get:
|
||||
summary: "Gets a summary of the storage space allocation of the node."
|
||||
|
||||
@ -54,7 +54,7 @@ asyncchecksuite "Block Advertising and Discovery":
|
||||
peerStore = PeerCtxStore.new()
|
||||
pendingBlocks = PendingBlocksManager.new()
|
||||
|
||||
(manifest, tree) = makeManifestAndTree(blocks).tryGet()
|
||||
(_, tree, manifest) = makeDataset(blocks).tryGet()
|
||||
manifestBlock =
|
||||
bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet()
|
||||
|
||||
@ -172,7 +172,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
break
|
||||
|
||||
blocks.add(bt.Block.new(chunk).tryGet())
|
||||
let (manifest, tree) = makeManifestAndTree(blocks).tryGet()
|
||||
let (_, tree, manifest) = makeDataset(blocks).tryGet()
|
||||
manifests.add(manifest)
|
||||
mBlocks.add(manifest.asBlock())
|
||||
trees.add(tree)
|
||||
@ -216,7 +216,6 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
test "E2E - Should advertise and discover blocks":
|
||||
# Distribute the manifests and trees amongst 1..3
|
||||
# Ask 0 to download everything without connecting him beforehand
|
||||
|
||||
var advertised: Table[Cid, SignedPeerRecord]
|
||||
|
||||
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
@ -242,6 +241,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid)
|
||||
)
|
||||
],
|
||||
allowSpurious = true,
|
||||
)
|
||||
|
||||
discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid)
|
||||
@ -252,6 +252,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid)
|
||||
)
|
||||
],
|
||||
allowSpurious = true,
|
||||
)
|
||||
|
||||
discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid)
|
||||
@ -262,6 +263,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid)
|
||||
)
|
||||
],
|
||||
allowSpurious = true,
|
||||
)
|
||||
|
||||
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
|
||||
@ -311,6 +313,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid)
|
||||
)
|
||||
],
|
||||
allowSpurious = true,
|
||||
)
|
||||
|
||||
discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid)
|
||||
@ -321,6 +324,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid)
|
||||
)
|
||||
],
|
||||
allowSpurious = true,
|
||||
)
|
||||
|
||||
discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid)
|
||||
@ -331,6 +335,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid)
|
||||
)
|
||||
],
|
||||
allowSpurious = true,
|
||||
)
|
||||
|
||||
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
|
||||
|
||||
@ -43,7 +43,7 @@ asyncchecksuite "Test Discovery Engine":
|
||||
|
||||
blocks.add(bt.Block.new(chunk).tryGet())
|
||||
|
||||
(manifest, tree) = makeManifestAndTree(blocks).tryGet()
|
||||
(_, tree, manifest) = makeDataset(blocks).tryGet()
|
||||
manifestBlock = manifest.asBlock()
|
||||
blocks.add(manifestBlock)
|
||||
|
||||
|
||||
@ -29,14 +29,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
|
||||
nodeCmps1 = generateNodes(1, blocks1).components[0]
|
||||
nodeCmps2 = generateNodes(1, blocks2).components[0]
|
||||
|
||||
await allFuturesThrowing(
|
||||
nodeCmps1.switch.start(),
|
||||
nodeCmps1.blockDiscovery.start(),
|
||||
nodeCmps1.engine.start(),
|
||||
nodeCmps2.switch.start(),
|
||||
nodeCmps2.blockDiscovery.start(),
|
||||
nodeCmps2.engine.start(),
|
||||
)
|
||||
await allFuturesThrowing(nodeCmps1.start(), nodeCmps2.start())
|
||||
|
||||
# initialize our want lists
|
||||
pendingBlocks1 =
|
||||
@ -65,14 +58,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
|
||||
check isNil(peerCtx2).not
|
||||
|
||||
teardown:
|
||||
await allFuturesThrowing(
|
||||
nodeCmps1.blockDiscovery.stop(),
|
||||
nodeCmps1.engine.stop(),
|
||||
nodeCmps1.switch.stop(),
|
||||
nodeCmps2.blockDiscovery.stop(),
|
||||
nodeCmps2.engine.stop(),
|
||||
nodeCmps2.switch.stop(),
|
||||
)
|
||||
await allFuturesThrowing(nodeCmps1.stop(), nodeCmps2.stop())
|
||||
|
||||
test "Should exchange blocks on connect":
|
||||
await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds)
|
||||
@ -96,17 +82,11 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
|
||||
test "Should send want-have for block":
|
||||
let blk = bt.Block.new("Block 1".toBytes).tryGet()
|
||||
let blkFut = nodeCmps1.pendingBlocks.getWantHandle(blk.cid)
|
||||
peerCtx2.blockRequestScheduled(blk.address)
|
||||
|
||||
(await nodeCmps2.localStore.putBlock(blk)).tryGet()
|
||||
|
||||
let entry = WantListEntry(
|
||||
address: blk.address,
|
||||
priority: 1,
|
||||
cancel: false,
|
||||
wantType: WantType.WantBlock,
|
||||
sendDontHave: false,
|
||||
)
|
||||
|
||||
peerCtx1.peerWants.add(entry)
|
||||
peerCtx1.wantedBlocks.incl(blk.address)
|
||||
check nodeCmps2.engine.taskQueue.pushOrUpdateNoWait(peerCtx1).isOk
|
||||
|
||||
check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet()
|
||||
@ -209,3 +189,38 @@ asyncchecksuite "NetworkStore - multiple nodes":
|
||||
|
||||
check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3]
|
||||
check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15]
|
||||
|
||||
asyncchecksuite "NetworkStore - dissemination":
|
||||
var nodes: seq[NodesComponents]
|
||||
|
||||
teardown:
|
||||
if nodes.len > 0:
|
||||
await nodes.stop()
|
||||
|
||||
test "Should disseminate blocks across large diameter swarm":
|
||||
let dataset = makeDataset(await makeRandomBlocks(60 * 256, 256'nb)).tryGet()
|
||||
|
||||
nodes = generateNodes(
|
||||
6,
|
||||
config = NodeConfig(
|
||||
useRepoStore: false,
|
||||
findFreePorts: false,
|
||||
basePort: 8080,
|
||||
createFullNode: false,
|
||||
enableBootstrap: false,
|
||||
enableDiscovery: true,
|
||||
),
|
||||
)
|
||||
|
||||
await assignBlocks(nodes[0], dataset, 0 .. 9)
|
||||
await assignBlocks(nodes[1], dataset, 10 .. 19)
|
||||
await assignBlocks(nodes[2], dataset, 20 .. 29)
|
||||
await assignBlocks(nodes[3], dataset, 30 .. 39)
|
||||
await assignBlocks(nodes[4], dataset, 40 .. 49)
|
||||
await assignBlocks(nodes[5], dataset, 50 .. 59)
|
||||
|
||||
await nodes.start()
|
||||
await nodes.linearTopology()
|
||||
|
||||
let downloads = nodes.mapIt(downloadDataset(it, dataset))
|
||||
await allFuturesThrowing(downloads).wait(30.seconds)
|
||||
|
||||
@ -27,8 +27,6 @@ const NopSendWantCancellationsProc = proc(
|
||||
|
||||
asyncchecksuite "NetworkStore engine basic":
|
||||
var
|
||||
rng: Rng
|
||||
seckey: PrivateKey
|
||||
peerId: PeerId
|
||||
chunker: Chunker
|
||||
wallet: WalletRef
|
||||
@ -39,9 +37,7 @@ asyncchecksuite "NetworkStore engine basic":
|
||||
done: Future[void]
|
||||
|
||||
setup:
|
||||
rng = Rng.instance()
|
||||
seckey = PrivateKey.random(rng[]).tryGet()
|
||||
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
|
||||
peerId = PeerId.example
|
||||
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
|
||||
wallet = WalletRef.example
|
||||
blockDiscovery = Discovery.new()
|
||||
@ -83,7 +79,7 @@ asyncchecksuite "NetworkStore engine basic":
|
||||
|
||||
for b in blocks:
|
||||
discard engine.pendingBlocks.getWantHandle(b.cid)
|
||||
await engine.setupPeer(peerId)
|
||||
await engine.peerAddedHandler(peerId)
|
||||
|
||||
await done.wait(100.millis)
|
||||
|
||||
@ -111,14 +107,12 @@ asyncchecksuite "NetworkStore engine basic":
|
||||
)
|
||||
|
||||
engine.pricing = pricing.some
|
||||
await engine.setupPeer(peerId)
|
||||
await engine.peerAddedHandler(peerId)
|
||||
|
||||
await done.wait(100.millis)
|
||||
|
||||
asyncchecksuite "NetworkStore engine handlers":
|
||||
var
|
||||
rng: Rng
|
||||
seckey: PrivateKey
|
||||
peerId: PeerId
|
||||
chunker: Chunker
|
||||
wallet: WalletRef
|
||||
@ -134,8 +128,7 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
blocks: seq[Block]
|
||||
|
||||
setup:
|
||||
rng = Rng.instance()
|
||||
chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb)
|
||||
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
|
||||
|
||||
while true:
|
||||
let chunk = await chunker.getBytes()
|
||||
@ -144,8 +137,7 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
|
||||
blocks.add(Block.new(chunk).tryGet())
|
||||
|
||||
seckey = PrivateKey.random(rng[]).tryGet()
|
||||
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
|
||||
peerId = PeerId.example
|
||||
wallet = WalletRef.example
|
||||
blockDiscovery = Discovery.new()
|
||||
peerStore = PeerCtxStore.new()
|
||||
@ -174,7 +166,7 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
let ctx = await engine.taskQueue.pop()
|
||||
check ctx.id == peerId
|
||||
# only `wantBlock` scheduled
|
||||
check ctx.peerWants.mapIt(it.address.cidOrTreeCid) == blocks.mapIt(it.cid)
|
||||
check ctx.wantedBlocks == blocks.mapIt(it.address).toHashSet
|
||||
|
||||
let done = handler()
|
||||
await engine.wantListHandler(peerId, wantList)
|
||||
@ -249,6 +241,9 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
test "Should store blocks in local store":
|
||||
let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
|
||||
|
||||
for blk in blocks:
|
||||
peerCtx.blockRequestScheduled(blk.address)
|
||||
|
||||
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
|
||||
|
||||
# Install NOP for want list cancellations so they don't cause a crash
|
||||
@ -274,6 +269,9 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
(it.address, Presence(address: it.address, price: rand(uint16).u256, have: true))
|
||||
).toTable
|
||||
|
||||
for blk in blocks:
|
||||
peerContext.blockRequestScheduled(blk.address)
|
||||
|
||||
engine.network = BlockExcNetwork(
|
||||
request: BlockExcRequest(
|
||||
sendPayment: proc(
|
||||
@ -337,33 +335,44 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
check a in peerCtx.peerHave
|
||||
check peerCtx.blocks[a].price == price
|
||||
|
||||
test "Should send cancellations for received blocks":
|
||||
test "Should send cancellations for requested blocks only":
|
||||
let
|
||||
pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
|
||||
blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
|
||||
cancellations = newTable(blocks.mapIt((it.address, newFuture[void]())).toSeq)
|
||||
pendingPeer = peerId # peer towards which we have pending block requests
|
||||
pendingPeerCtx = peerCtx
|
||||
senderPeer = PeerId.example # peer that will actually send the blocks
|
||||
senderPeerCtx = BlockExcPeerCtx(id: senderPeer)
|
||||
reqBlocks = @[blocks[0], blocks[4]] # blocks that we requested to pendingPeer
|
||||
reqBlockAddrs = reqBlocks.mapIt(it.address)
|
||||
blockHandles = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
|
||||
|
||||
peerCtx.blocks = blocks.mapIt(
|
||||
(it.address, Presence(address: it.address, have: true, price: UInt256.example))
|
||||
).toTable
|
||||
var cancelled: HashSet[BlockAddress]
|
||||
|
||||
engine.peers.add(senderPeerCtx)
|
||||
for address in reqBlockAddrs:
|
||||
pendingPeerCtx.blockRequestScheduled(address)
|
||||
|
||||
for address in blocks.mapIt(it.address):
|
||||
senderPeerCtx.blockRequestScheduled(address)
|
||||
|
||||
proc sendWantCancellations(
|
||||
id: PeerId, addresses: seq[BlockAddress]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
assert id == pendingPeer
|
||||
for address in addresses:
|
||||
cancellations[address].catch.expect("address should exist").complete()
|
||||
cancelled.incl(address)
|
||||
|
||||
engine.network = BlockExcNetwork(
|
||||
request: BlockExcRequest(sendWantCancellations: sendWantCancellations)
|
||||
)
|
||||
|
||||
await engine.blocksDeliveryHandler(peerId, blocksDelivery)
|
||||
discard await allFinished(pending).wait(100.millis)
|
||||
await allFuturesThrowing(cancellations.values().toSeq)
|
||||
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
|
||||
await engine.blocksDeliveryHandler(senderPeer, blocksDelivery)
|
||||
discard await allFinished(blockHandles).wait(100.millis)
|
||||
|
||||
check cancelled == reqBlockAddrs.toHashSet()
|
||||
|
||||
asyncchecksuite "Block Download":
|
||||
var
|
||||
rng: Rng
|
||||
seckey: PrivateKey
|
||||
peerId: PeerId
|
||||
chunker: Chunker
|
||||
@ -380,8 +389,7 @@ asyncchecksuite "Block Download":
|
||||
blocks: seq[Block]
|
||||
|
||||
setup:
|
||||
rng = Rng.instance()
|
||||
chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb)
|
||||
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
|
||||
|
||||
while true:
|
||||
let chunk = await chunker.getBytes()
|
||||
@ -390,8 +398,7 @@ asyncchecksuite "Block Download":
|
||||
|
||||
blocks.add(Block.new(chunk).tryGet())
|
||||
|
||||
seckey = PrivateKey.random(rng[]).tryGet()
|
||||
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
|
||||
peerId = PeerId.example
|
||||
wallet = WalletRef.example
|
||||
blockDiscovery = Discovery.new()
|
||||
peerStore = PeerCtxStore.new()
|
||||
@ -409,13 +416,27 @@ asyncchecksuite "Block Download":
|
||||
localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks
|
||||
)
|
||||
|
||||
peerCtx = BlockExcPeerCtx(id: peerId)
|
||||
peerCtx = BlockExcPeerCtx(id: peerId, activityTimeout: 100.milliseconds)
|
||||
engine.peers.add(peerCtx)
|
||||
|
||||
test "Should exhaust retries":
|
||||
test "Should reschedule blocks on peer timeout":
|
||||
let
|
||||
slowPeer = peerId
|
||||
fastPeer = PeerId.example
|
||||
slowPeerCtx = peerCtx
|
||||
# "Fast" peer has in fact a generous timeout. This should avoid timing issues
|
||||
# in the test.
|
||||
fastPeerCtx = BlockExcPeerCtx(id: fastPeer, activityTimeout: 60.seconds)
|
||||
requestedBlock = blocks[0]
|
||||
|
||||
var
|
||||
retries = 2
|
||||
address = BlockAddress.init(blocks[0].cid)
|
||||
slowPeerWantList = newFuture[void]("slowPeerWantList")
|
||||
fastPeerWantList = newFuture[void]("fastPeerWantList")
|
||||
slowPeerDropped = newFuture[void]("slowPeerDropped")
|
||||
slowPeerBlockRequest = newFuture[void]("slowPeerBlockRequest")
|
||||
fastPeerBlockRequest = newFuture[void]("fastPeerBlockRequest")
|
||||
|
||||
engine.peers.add(fastPeerCtx)
|
||||
|
||||
proc sendWantList(
|
||||
id: PeerId,
|
||||
@ -426,68 +447,63 @@ asyncchecksuite "Block Download":
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check wantType == WantHave
|
||||
check not engine.pendingBlocks.isInFlight(address)
|
||||
check engine.pendingBlocks.retries(address) == retries
|
||||
retries -= 1
|
||||
check addresses == @[requestedBlock.address]
|
||||
|
||||
engine.pendingBlocks.blockRetries = 2
|
||||
engine.pendingBlocks.retryInterval = 10.millis
|
||||
if wantType == WantBlock:
|
||||
if id == slowPeer:
|
||||
slowPeerBlockRequest.complete()
|
||||
else:
|
||||
fastPeerBlockRequest.complete()
|
||||
|
||||
if wantType == WantHave:
|
||||
if id == slowPeer:
|
||||
slowPeerWantList.complete()
|
||||
else:
|
||||
fastPeerWantList.complete()
|
||||
|
||||
proc onPeerDropped(
|
||||
peer: PeerId
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
assert peer == slowPeer
|
||||
slowPeerDropped.complete()
|
||||
|
||||
proc selectPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
|
||||
# Looks for the slow peer.
|
||||
for peer in peers:
|
||||
if peer.id == slowPeer:
|
||||
return peer
|
||||
|
||||
return peers[0]
|
||||
|
||||
engine.selectPeer = selectPeer
|
||||
engine.pendingBlocks.retryInterval = 200.milliseconds
|
||||
engine.network =
|
||||
BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
|
||||
engine.network.handlers.onPeerDropped = onPeerDropped
|
||||
|
||||
let pending = engine.requestBlock(address)
|
||||
let blockHandle = engine.requestBlock(requestedBlock.address)
|
||||
|
||||
expect RetriesExhaustedError:
|
||||
discard (await pending).tryGet()
|
||||
# Waits for the peer to send its want list to both peers.
|
||||
await slowPeerWantList.wait(5.seconds)
|
||||
await fastPeerWantList.wait(5.seconds)
|
||||
|
||||
test "Should retry block request":
|
||||
var
|
||||
address = BlockAddress.init(blocks[0].cid)
|
||||
steps = newAsyncEvent()
|
||||
let blockPresence =
|
||||
@[BlockPresence(address: requestedBlock.address, type: BlockPresenceType.Have)]
|
||||
|
||||
proc sendWantList(
|
||||
id: PeerId,
|
||||
addresses: seq[BlockAddress],
|
||||
priority: int32 = 0,
|
||||
cancel: bool = false,
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
case wantType
|
||||
of WantHave:
|
||||
check engine.pendingBlocks.isInFlight(address) == false
|
||||
check engine.pendingBlocks.retriesExhausted(address) == false
|
||||
steps.fire()
|
||||
of WantBlock:
|
||||
check engine.pendingBlocks.isInFlight(address) == true
|
||||
check engine.pendingBlocks.retriesExhausted(address) == false
|
||||
steps.fire()
|
||||
|
||||
engine.pendingBlocks.blockRetries = 10
|
||||
engine.pendingBlocks.retryInterval = 10.millis
|
||||
engine.network = BlockExcNetwork(
|
||||
request: BlockExcRequest(
|
||||
sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc
|
||||
)
|
||||
)
|
||||
|
||||
let pending = engine.requestBlock(address)
|
||||
await steps.wait()
|
||||
|
||||
# add blocks precense
|
||||
peerCtx.blocks = blocks.mapIt(
|
||||
(it.address, Presence(address: it.address, have: true, price: UInt256.example))
|
||||
).toTable
|
||||
|
||||
steps.clear()
|
||||
await steps.wait()
|
||||
await engine.blockPresenceHandler(slowPeer, blockPresence)
|
||||
await engine.blockPresenceHandler(fastPeer, blockPresence)
|
||||
# Waits for the peer to ask for the block.
|
||||
await slowPeerBlockRequest.wait(5.seconds)
|
||||
# Don't reply and wait for the peer to be dropped by timeout.
|
||||
await slowPeerDropped.wait(5.seconds)
|
||||
|
||||
# The engine should retry and ask the fast peer for the block.
|
||||
await fastPeerBlockRequest.wait(5.seconds)
|
||||
await engine.blocksDeliveryHandler(
|
||||
peerId, @[BlockDelivery(blk: blocks[0], address: address)]
|
||||
fastPeer, @[BlockDelivery(blk: requestedBlock, address: requestedBlock.address)]
|
||||
)
|
||||
check (await pending).tryGet() == blocks[0]
|
||||
|
||||
discard await blockHandle.wait(5.seconds)
|
||||
|
||||
test "Should cancel block request":
|
||||
var
|
||||
@ -522,8 +538,6 @@ asyncchecksuite "Block Download":
|
||||
|
||||
asyncchecksuite "Task Handler":
|
||||
var
|
||||
rng: Rng
|
||||
seckey: PrivateKey
|
||||
peerId: PeerId
|
||||
chunker: Chunker
|
||||
wallet: WalletRef
|
||||
@ -541,8 +555,7 @@ asyncchecksuite "Task Handler":
|
||||
blocks: seq[Block]
|
||||
|
||||
setup:
|
||||
rng = Rng.instance()
|
||||
chunker = RandomChunker.new(rng, size = 1024, chunkSize = 256'nb)
|
||||
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256'nb)
|
||||
while true:
|
||||
let chunk = await chunker.getBytes()
|
||||
if chunk.len <= 0:
|
||||
@ -550,8 +563,7 @@ asyncchecksuite "Task Handler":
|
||||
|
||||
blocks.add(Block.new(chunk).tryGet())
|
||||
|
||||
seckey = PrivateKey.random(rng[]).tryGet()
|
||||
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
|
||||
peerId = PeerId.example
|
||||
wallet = WalletRef.example
|
||||
blockDiscovery = Discovery.new()
|
||||
peerStore = PeerCtxStore.new()
|
||||
@ -571,138 +583,72 @@ asyncchecksuite "Task Handler":
|
||||
peersCtx = @[]
|
||||
|
||||
for i in 0 .. 3:
|
||||
let seckey = PrivateKey.random(rng[]).tryGet()
|
||||
peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet())
|
||||
|
||||
peers.add(PeerId.example)
|
||||
peersCtx.add(BlockExcPeerCtx(id: peers[i]))
|
||||
peerStore.add(peersCtx[i])
|
||||
|
||||
engine.pricing = Pricing.example.some
|
||||
|
||||
test "Should send want-blocks in priority order":
|
||||
# FIXME: this is disabled for now: I've dropped block priorities to make
|
||||
# my life easier as I try to optimize the protocol, and also because
|
||||
# they were not being used anywhere.
|
||||
#
|
||||
# test "Should send want-blocks in priority order":
|
||||
# proc sendBlocksDelivery(
|
||||
# id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
# ) {.async: (raises: [CancelledError]).} =
|
||||
# check blocksDelivery.len == 2
|
||||
# check:
|
||||
# blocksDelivery[1].address == blocks[0].address
|
||||
# blocksDelivery[0].address == blocks[1].address
|
||||
|
||||
# for blk in blocks:
|
||||
# (await engine.localStore.putBlock(blk)).tryGet()
|
||||
# engine.network.request.sendBlocksDelivery = sendBlocksDelivery
|
||||
|
||||
# # second block to send by priority
|
||||
# peersCtx[0].peerWants.add(
|
||||
# WantListEntry(
|
||||
# address: blocks[0].address,
|
||||
# priority: 49,
|
||||
# cancel: false,
|
||||
# wantType: WantType.WantBlock,
|
||||
# sendDontHave: false,
|
||||
# )
|
||||
# )
|
||||
|
||||
# # first block to send by priority
|
||||
# peersCtx[0].peerWants.add(
|
||||
# WantListEntry(
|
||||
# address: blocks[1].address,
|
||||
# priority: 50,
|
||||
# cancel: false,
|
||||
# wantType: WantType.WantBlock,
|
||||
# sendDontHave: false,
|
||||
# )
|
||||
# )
|
||||
|
||||
# await engine.taskHandler(peersCtx[0])
|
||||
|
||||
test "Should mark outgoing blocks as sent":
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check blocksDelivery.len == 2
|
||||
check:
|
||||
blocksDelivery[1].address == blocks[0].address
|
||||
blocksDelivery[0].address == blocks[1].address
|
||||
let blockAddress = peersCtx[0].wantedBlocks.toSeq[0]
|
||||
check peersCtx[0].isBlockSent(blockAddress)
|
||||
|
||||
for blk in blocks:
|
||||
(await engine.localStore.putBlock(blk)).tryGet()
|
||||
engine.network.request.sendBlocksDelivery = sendBlocksDelivery
|
||||
|
||||
# second block to send by priority
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: blocks[0].address,
|
||||
priority: 49,
|
||||
cancel: false,
|
||||
wantType: WantType.WantBlock,
|
||||
sendDontHave: false,
|
||||
)
|
||||
)
|
||||
|
||||
# first block to send by priority
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: blocks[1].address,
|
||||
priority: 50,
|
||||
cancel: false,
|
||||
wantType: WantType.WantBlock,
|
||||
sendDontHave: false,
|
||||
)
|
||||
)
|
||||
peersCtx[0].wantedBlocks.incl(blocks[0].address)
|
||||
|
||||
await engine.taskHandler(peersCtx[0])
|
||||
|
||||
test "Should set in-flight for outgoing blocks":
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check peersCtx[0].peerWants[0].inFlight
|
||||
|
||||
for blk in blocks:
|
||||
(await engine.localStore.putBlock(blk)).tryGet()
|
||||
engine.network.request.sendBlocksDelivery = sendBlocksDelivery
|
||||
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: blocks[0].address,
|
||||
priority: 50,
|
||||
cancel: false,
|
||||
wantType: WantType.WantBlock,
|
||||
sendDontHave: false,
|
||||
inFlight: false,
|
||||
)
|
||||
)
|
||||
await engine.taskHandler(peersCtx[0])
|
||||
|
||||
test "Should clear in-flight when local lookup fails":
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: blocks[0].address,
|
||||
priority: 50,
|
||||
cancel: false,
|
||||
wantType: WantType.WantBlock,
|
||||
sendDontHave: false,
|
||||
inFlight: false,
|
||||
)
|
||||
)
|
||||
await engine.taskHandler(peersCtx[0])
|
||||
|
||||
check not peersCtx[0].peerWants[0].inFlight
|
||||
|
||||
test "Should send presence":
|
||||
let present = blocks
|
||||
let missing = @[Block.new("missing".toBytes).tryGet()]
|
||||
let price = (!engine.pricing).price
|
||||
|
||||
proc sendPresence(
|
||||
id: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check presence.mapIt(!Presence.init(it)) ==
|
||||
@[
|
||||
Presence(address: present[0].address, have: true, price: price),
|
||||
Presence(address: present[1].address, have: true, price: price),
|
||||
Presence(address: missing[0].address, have: false),
|
||||
]
|
||||
|
||||
for blk in blocks:
|
||||
(await engine.localStore.putBlock(blk)).tryGet()
|
||||
engine.network.request.sendPresence = sendPresence
|
||||
|
||||
# have block
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: present[0].address,
|
||||
priority: 1,
|
||||
cancel: false,
|
||||
wantType: WantType.WantHave,
|
||||
sendDontHave: false,
|
||||
)
|
||||
)
|
||||
|
||||
# have block
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: present[1].address,
|
||||
priority: 1,
|
||||
cancel: false,
|
||||
wantType: WantType.WantHave,
|
||||
sendDontHave: false,
|
||||
)
|
||||
)
|
||||
|
||||
# don't have block
|
||||
peersCtx[0].peerWants.add(
|
||||
WantListEntry(
|
||||
address: missing[0].address,
|
||||
priority: 1,
|
||||
cancel: false,
|
||||
wantType: WantType.WantHave,
|
||||
sendDontHave: false,
|
||||
)
|
||||
)
|
||||
test "Should not mark blocks for which local look fails as sent":
|
||||
peersCtx[0].wantedBlocks.incl(blocks[0].address)
|
||||
|
||||
await engine.taskHandler(peersCtx[0])
|
||||
|
||||
let blockAddress = peersCtx[0].wantedBlocks.toSeq[0]
|
||||
check not peersCtx[0].isBlockSent(blockAddress)
|
||||
|
||||
@ -40,7 +40,7 @@ asyncchecksuite "Network - Handlers":
|
||||
done = newFuture[void]()
|
||||
buffer = BufferStream.new()
|
||||
network = BlockExcNetwork.new(switch = newStandardSwitch(), connProvider = getConn)
|
||||
network.setupPeer(peerId)
|
||||
await network.handlePeerJoined(peerId)
|
||||
networkPeer = network.peers[peerId]
|
||||
discard await networkPeer.connect()
|
||||
|
||||
|
||||
@ -81,8 +81,9 @@ suite "Peer Context Store Peer Selection":
|
||||
)
|
||||
)
|
||||
|
||||
peerCtxs[0].peerWants = entries
|
||||
peerCtxs[5].peerWants = entries
|
||||
for address in addresses:
|
||||
peerCtxs[0].wantedBlocks.incl(address)
|
||||
peerCtxs[5].wantedBlocks.incl(address)
|
||||
|
||||
let peers = store.peersWant(addresses[4])
|
||||
|
||||
|
||||
@ -38,8 +38,7 @@ proc example*(_: type Pricing): Pricing =
|
||||
Pricing(address: EthAddress.example, price: uint32.rand.u256)
|
||||
|
||||
proc example*(_: type bt.Block, size: int = 4096): bt.Block =
|
||||
let length = rand(size)
|
||||
let bytes = newSeqWith(length, rand(uint8))
|
||||
let bytes = newSeqWith(size, rand(uint8))
|
||||
bt.Block.new(bytes).tryGet()
|
||||
|
||||
proc example*(_: type PeerId): PeerId =
|
||||
|
||||
@ -12,13 +12,16 @@ import pkg/codex/rng
|
||||
import pkg/codex/utils
|
||||
|
||||
import ./helpers/nodeutils
|
||||
import ./helpers/datasetutils
|
||||
import ./helpers/randomchunker
|
||||
import ./helpers/mockchunker
|
||||
import ./helpers/mockdiscovery
|
||||
import ./helpers/always
|
||||
import ../checktest
|
||||
|
||||
export randomchunker, nodeutils, mockdiscovery, mockchunker, always, checktest, manifest
|
||||
export
|
||||
randomchunker, nodeutils, datasetutils, mockdiscovery, mockchunker, always, checktest,
|
||||
manifest
|
||||
|
||||
export libp2p except setup, eventually
|
||||
|
||||
@ -46,23 +49,6 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
|
||||
|
||||
return buf
|
||||
|
||||
proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, CodexTree) =
|
||||
if blocks.len == 0:
|
||||
return failure("Blocks list was empty")
|
||||
|
||||
let
|
||||
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
|
||||
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
|
||||
tree = ?CodexTree.init(blocks.mapIt(it.cid))
|
||||
treeCid = ?tree.rootCid
|
||||
manifest = Manifest.new(
|
||||
treeCid = treeCid,
|
||||
blockSize = NBytes(blockSize),
|
||||
datasetSize = NBytes(datasetSize),
|
||||
)
|
||||
|
||||
return success((manifest, tree))
|
||||
|
||||
proc makeWantList*(
|
||||
cids: seq[Cid],
|
||||
priority: int = 0,
|
||||
@ -91,7 +77,7 @@ proc storeDataGetManifest*(
|
||||
(await store.putBlock(blk)).tryGet()
|
||||
|
||||
let
|
||||
(manifest, tree) = makeManifestAndTree(blocks).tryGet()
|
||||
(_, tree, manifest) = makeDataset(blocks).tryGet()
|
||||
treeCid = tree.rootCid.tryGet()
|
||||
|
||||
for i in 0 ..< tree.leavesCount:
|
||||
@ -110,19 +96,6 @@ proc storeDataGetManifest*(
|
||||
|
||||
return await storeDataGetManifest(store, blocks)
|
||||
|
||||
proc makeRandomBlocks*(
|
||||
datasetSize: int, blockSize: NBytes
|
||||
): Future[seq[Block]] {.async.} =
|
||||
var chunker =
|
||||
RandomChunker.new(Rng.instance(), size = datasetSize, chunkSize = blockSize)
|
||||
|
||||
while true:
|
||||
let chunk = await chunker.getBytes()
|
||||
if chunk.len <= 0:
|
||||
break
|
||||
|
||||
result.add(Block.new(chunk).tryGet())
|
||||
|
||||
proc corruptBlocks*(
|
||||
store: BlockStore, manifest: Manifest, blks, bytes: int
|
||||
): Future[seq[int]] {.async.} =
|
||||
@ -147,4 +120,5 @@ proc corruptBlocks*(
|
||||
|
||||
bytePos.add(ii)
|
||||
blk.data[ii] = byte 0
|
||||
|
||||
return pos
|
||||
|
||||
45
tests/codex/helpers/datasetutils.nim
Normal file
45
tests/codex/helpers/datasetutils.nim
Normal file
@ -0,0 +1,45 @@
|
||||
import std/random
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/merkletree
|
||||
import pkg/codex/manifest
|
||||
import pkg/codex/rng
|
||||
|
||||
import ./randomchunker
|
||||
|
||||
type TestDataset* = tuple[blocks: seq[Block], tree: CodexTree, manifest: Manifest]
|
||||
|
||||
proc makeRandomBlock*(size: NBytes): Block =
|
||||
let bytes = newSeqWith(size.int, rand(uint8))
|
||||
Block.new(bytes).tryGet()
|
||||
|
||||
proc makeRandomBlocks*(
|
||||
datasetSize: int, blockSize: NBytes
|
||||
): Future[seq[Block]] {.async.} =
|
||||
var chunker =
|
||||
RandomChunker.new(Rng.instance(), size = datasetSize, chunkSize = blockSize)
|
||||
|
||||
while true:
|
||||
let chunk = await chunker.getBytes()
|
||||
if chunk.len <= 0:
|
||||
break
|
||||
|
||||
result.add(Block.new(chunk).tryGet())
|
||||
|
||||
proc makeDataset*(blocks: seq[Block]): ?!TestDataset =
|
||||
if blocks.len == 0:
|
||||
return failure("Blocks list was empty")
|
||||
|
||||
let
|
||||
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
|
||||
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
|
||||
tree = ?CodexTree.init(blocks.mapIt(it.cid))
|
||||
treeCid = ?tree.rootCid
|
||||
manifest = Manifest.new(
|
||||
treeCid = treeCid,
|
||||
blockSize = NBytes(blockSize),
|
||||
datasetSize = NBytes(datasetSize),
|
||||
)
|
||||
|
||||
return success((blocks, tree, manifest))
|
||||
@ -21,7 +21,7 @@ proc new*(
|
||||
var consumed = 0
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
|
||||
if consumed >= dataset.len:
|
||||
return 0
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user