mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-19 05:43:12 +00:00
merging master
This commit is contained in:
commit
d8f3739385
32
.github/workflows/ci.yml
vendored
32
.github/workflows/ci.yml
vendored
@ -7,28 +7,43 @@ on:
|
||||
workflow_dispatch:
|
||||
env:
|
||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||
nim_version: v1.6.10
|
||||
nim_version: v1.6.14
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
include:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
builder: ubuntu-latest
|
||||
shell: bash --noprofile --norc -e -o pipefail
|
||||
tests: all
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
builder: macos-latest
|
||||
shell: bash --noprofile --norc -e -o pipefail
|
||||
tests: all
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
builder: windows-latest
|
||||
shell: msys2
|
||||
tests: unittest
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
builder: windows-latest
|
||||
shell: msys2
|
||||
tests: contract
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
builder: windows-latest
|
||||
shell: msys2
|
||||
tests: integration
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }} {0}
|
||||
|
||||
name: '${{ matrix.os }}'
|
||||
name: '${{ matrix.os }}-${{ matrix.cpu }}-tests-${{ matrix.tests }}'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
timeout-minutes: 80
|
||||
steps:
|
||||
@ -44,7 +59,9 @@ jobs:
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_version: ${{ env.nim_version }}
|
||||
|
||||
## Part 1 Tests ##
|
||||
- name: Unit tests
|
||||
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} test
|
||||
|
||||
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
|
||||
@ -54,6 +71,7 @@ jobs:
|
||||
node-version: 18.15
|
||||
|
||||
- name: Start Ethereum node with Codex contracts
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'all'
|
||||
working-directory: vendor/codex-contracts-eth
|
||||
env:
|
||||
MSYS2_PATH_TYPE: inherit
|
||||
@ -61,10 +79,14 @@ jobs:
|
||||
npm install
|
||||
npm start &
|
||||
|
||||
## Part 2 Tests ##
|
||||
- name: Contract tests
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} testContracts
|
||||
|
||||
## Part 3 Tests ##
|
||||
- name: Integration tests
|
||||
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} testIntegration
|
||||
|
||||
coverage:
|
||||
@ -83,7 +105,9 @@ jobs:
|
||||
nim_version: ${{ env.nim_version }}
|
||||
|
||||
- name: Generate coverage data
|
||||
run: make -j${ncpu} coverage
|
||||
run: |
|
||||
# make -j${ncpu} coverage
|
||||
make -j${ncpu} coverage-script
|
||||
shell: bash
|
||||
|
||||
- name: Upload coverage data to Codecov
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -15,6 +15,8 @@ coverage/
|
||||
|
||||
# Nimble packages
|
||||
/vendor/.nimble
|
||||
/vendor/packages/
|
||||
# /vendor/*/
|
||||
|
||||
# Nimble user files
|
||||
nimble.develop
|
||||
@ -36,3 +38,4 @@ nimbus-build-system.paths
|
||||
docker/hostdatadir
|
||||
docker/prometheus-data
|
||||
.DS_Store
|
||||
nim.cfg
|
||||
|
||||
12
.gitmodules
vendored
12
.gitmodules
vendored
@ -181,3 +181,15 @@
|
||||
[submodule "vendor/codex-contracts-eth"]
|
||||
path = vendor/codex-contracts-eth
|
||||
url = https://github.com/status-im/codex-contracts-eth
|
||||
[submodule "vendor/nim-protobuf-serialization"]
|
||||
path = vendor/nim-protobuf-serialization
|
||||
url = https://github.com/status-im/nim-protobuf-serialization
|
||||
[submodule "vendor/nim-results"]
|
||||
path = vendor/nim-results
|
||||
url = https://github.com/arnetheduck/nim-results
|
||||
[submodule "vendor/nim-testutils"]
|
||||
path = vendor/nim-testutils
|
||||
url = https://github.com/status-im/nim-testutils
|
||||
[submodule "vendor/npeg"]
|
||||
path = vendor/npeg
|
||||
url = https://github.com/zevv/npeg
|
||||
|
||||
26
Makefile
26
Makefile
@ -48,7 +48,7 @@ else # "variables.mk" was included. Business as usual until the end of this file
|
||||
# Builds the codex binary
|
||||
all: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
|
||||
|
||||
# must be included after the default target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
|
||||
@ -60,15 +60,12 @@ else
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:release
|
||||
endif
|
||||
|
||||
deps: | deps-common nat-libs codex.nims
|
||||
deps: | deps-common nat-libs
|
||||
ifneq ($(USE_LIBBACKTRACE), 0)
|
||||
deps: | libbacktrace
|
||||
endif
|
||||
|
||||
#- deletes and recreates "codex.nims" which on Windows is a copy instead of a proper symlink
|
||||
update: | update-common
|
||||
rm -rf codex.nims && \
|
||||
$(MAKE) codex.nims $(HANDLE_OUTPUT)
|
||||
|
||||
# detecting the os
|
||||
ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10...
|
||||
@ -83,26 +80,22 @@ endif
|
||||
# Builds and run a part of the test suite
|
||||
test: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs the smart contract tests
|
||||
testContracts: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs the integration tests
|
||||
testIntegration: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) codex.nims
|
||||
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs all tests
|
||||
testAll: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) codex.nims
|
||||
|
||||
# symlink
|
||||
codex.nims:
|
||||
ln -s codex.nimble $@
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
|
||||
|
||||
# nim-libbacktrace
|
||||
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
||||
@ -127,8 +120,15 @@ coverage:
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
||||
|
||||
show-coverage:
|
||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||
|
||||
coverage-script: build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim coverage $(NIM_PARAMS) build.nims
|
||||
echo "Run `make show-coverage` to view coverage results"
|
||||
|
||||
# usual cleaning
|
||||
clean: | clean-common
|
||||
rm -rf build
|
||||
|
||||
209
atlas.lock
Normal file
209
atlas.lock
Normal file
@ -0,0 +1,209 @@
|
||||
{
|
||||
"clangVersion": "",
|
||||
"gccVersion": "",
|
||||
"hostCPU": "arm64",
|
||||
"hostOS": "macosx",
|
||||
"items": {
|
||||
"asynctest": {
|
||||
"commit": "fe1a34caf572b05f8bdba3b650f1871af9fce31e",
|
||||
"dir": "vendor/asynctest",
|
||||
"url": "https://github.com/codex-storage/asynctest"
|
||||
},
|
||||
"dnsclient.nim": {
|
||||
"commit": "23214235d4784d24aceed99bbfe153379ea557c8",
|
||||
"dir": "vendor/dnsclient.nim",
|
||||
"url": "https://github.com/ba0f3/dnsclient.nim"
|
||||
},
|
||||
"lrucache.nim": {
|
||||
"commit": "8767ade0b76ea5b5d4ce24a52d0c58a6ebeb66cd",
|
||||
"dir": "vendor/lrucache.nim",
|
||||
"url": "https://github.com/status-im/lrucache.nim"
|
||||
},
|
||||
"nim-bearssl": {
|
||||
"commit": "99fcb3405c55b27cfffbf60f5368c55da7346f23",
|
||||
"dir": "vendor/nim-bearssl",
|
||||
"url": "https://github.com/status-im/nim-bearssl"
|
||||
},
|
||||
"nim-blscurve": {
|
||||
"commit": "48d8668c5a9a350d3a7ee0c3713ef9a11980a40d",
|
||||
"dir": "vendor/nim-blscurve",
|
||||
"url": "https://github.com/status-im/nim-blscurve"
|
||||
},
|
||||
"nim-chronicles": {
|
||||
"commit": "c9c8e58ec3f89b655a046c485f622f9021c68b61",
|
||||
"dir": "vendor/nim-chronicles",
|
||||
"url": "https://github.com/status-im/nim-chronicles"
|
||||
},
|
||||
"nim-chronos": {
|
||||
"commit": "0277b65be2c7a365ac13df002fba6e172be55537",
|
||||
"dir": "vendor/nim-chronos",
|
||||
"url": "https://github.com/status-im/nim-chronos"
|
||||
},
|
||||
"nim-confutils": {
|
||||
"commit": "2028b41602b3abf7c9bf450744efde7b296707a2",
|
||||
"dir": "vendor/nim-confutils",
|
||||
"url": "https://github.com/status-im/nim-confutils"
|
||||
},
|
||||
"nim-contract-abi": {
|
||||
"commit": "61f8f59b3917d8e27c6eb4330a6d8cf428e98b2d",
|
||||
"dir": "vendor/nim-contract-abi",
|
||||
"url": "https://github.com/status-im/nim-contract-abi"
|
||||
},
|
||||
"nim-datastore": {
|
||||
"commit": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa",
|
||||
"dir": "vendor/nim-datastore",
|
||||
"url": "https://github.com/codex-storage/nim-datastore"
|
||||
},
|
||||
"nim-faststreams": {
|
||||
"commit": "720fc5e5c8e428d9d0af618e1e27c44b42350309",
|
||||
"dir": "vendor/nim-faststreams",
|
||||
"url": "https://github.com/status-im/nim-faststreams"
|
||||
},
|
||||
"nim-http-utils": {
|
||||
"commit": "3b491a40c60aad9e8d3407443f46f62511e63b18",
|
||||
"dir": "vendor/nim-http-utils",
|
||||
"url": "https://github.com/status-im/nim-http-utils"
|
||||
},
|
||||
"nim-json-rpc": {
|
||||
"commit": "0bf2bcbe74a18a3c7a709d57108bb7b51e748a92",
|
||||
"dir": "vendor/nim-json-rpc",
|
||||
"url": "https://github.com/status-im/nim-json-rpc"
|
||||
},
|
||||
"nim-json-serialization": {
|
||||
"commit": "bb53d49caf2a6c6cf1df365ba84af93cdcfa7aa3",
|
||||
"dir": "vendor/nim-json-serialization",
|
||||
"url": "https://github.com/status-im/nim-json-serialization"
|
||||
},
|
||||
"nim-leopard": {
|
||||
"commit": "1a6f2ab7252426a6ac01482a68b75d0c3b134cf0",
|
||||
"dir": "vendor/nim-leopard",
|
||||
"url": "https://github.com/status-im/nim-leopard"
|
||||
},
|
||||
"nim-libbacktrace": {
|
||||
"commit": "b29c22ba0ef13de50b779c776830dbea1d50cd33",
|
||||
"dir": "vendor/nim-libbacktrace",
|
||||
"url": "https://github.com/status-im/nim-libbacktrace"
|
||||
},
|
||||
"nim-libp2p": {
|
||||
"commit": "440461b24b9e66542b34d26a0b908c17f6549d05",
|
||||
"dir": "vendor/nim-libp2p",
|
||||
"url": "https://github.com/status-im/nim-libp2p"
|
||||
},
|
||||
"nim-libp2p-dht": {
|
||||
"commit": "fdd02450aa6979add7dabd29a3ba0f8738bf89f8",
|
||||
"dir": "vendor/nim-libp2p-dht",
|
||||
"url": "https://github.com/status-im/nim-libp2p-dht"
|
||||
},
|
||||
"nim-metrics": {
|
||||
"commit": "6142e433fc8ea9b73379770a788017ac528d46ff",
|
||||
"dir": "vendor/nim-metrics",
|
||||
"url": "https://github.com/status-im/nim-metrics"
|
||||
},
|
||||
"nim-nat-traversal": {
|
||||
"commit": "27d314d65c9078924b3239fe4e2f5af0c512b28c",
|
||||
"dir": "vendor/nim-nat-traversal",
|
||||
"url": "https://github.com/status-im/nim-nat-traversal"
|
||||
},
|
||||
"nim-nitro": {
|
||||
"commit": "6b4c455bf4dad7449c1580055733a1738fcd5aec",
|
||||
"dir": "vendor/nim-nitro",
|
||||
"url": "https://github.com/status-im/nim-nitro"
|
||||
},
|
||||
"nim-presto": {
|
||||
"commit": "3984431dc0fc829eb668e12e57e90542b041d298",
|
||||
"dir": "vendor/nim-presto",
|
||||
"url": "https://github.com/status-im/nim-presto"
|
||||
},
|
||||
"nim-protobuf-serialization": {
|
||||
"commit": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6",
|
||||
"dir": "vendor/nim-protobuf-serialization",
|
||||
"url": "https://github.com/status-im/nim-protobuf-serialization"
|
||||
},
|
||||
"nim-results": {
|
||||
"commit": "f3c666a272c69d70cb41e7245e7f6844797303ad",
|
||||
"dir": "vendor/nim-results",
|
||||
"url": "https://github.com/arnetheduck/nim-results"
|
||||
},
|
||||
"nim-secp256k1": {
|
||||
"commit": "2acbbdcc0e63002a013fff49f015708522875832",
|
||||
"dir": "vendor/nim-secp256k1",
|
||||
"url": "https://github.com/status-im/nim-secp256k1"
|
||||
},
|
||||
"nim-serialization": {
|
||||
"commit": "384eb2561ee755446cff512a8e057325848b86a7",
|
||||
"dir": "vendor/nim-serialization",
|
||||
"url": "https://github.com/status-im/nim-serialization"
|
||||
},
|
||||
"nim-sqlite3-abi": {
|
||||
"commit": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3",
|
||||
"dir": "vendor/nim-sqlite3-abi",
|
||||
"url": "https://github.com/arnetheduck/nim-sqlite3-abi"
|
||||
},
|
||||
"nim-stew": {
|
||||
"commit": "7afe7e3c070758cac1f628e4330109f3ef6fc853",
|
||||
"dir": "vendor/nim-stew",
|
||||
"url": "https://github.com/status-im/nim-stew"
|
||||
},
|
||||
"nim-taskpools": {
|
||||
"commit": "b3673c7a7a959ccacb393bd9b47e997bbd177f5a",
|
||||
"dir": "vendor/nim-taskpools",
|
||||
"url": "https://github.com/status-im/nim-taskpools"
|
||||
},
|
||||
"nim-testutils": {
|
||||
"commit": "b56a5953e37fc5117bd6ea6dfa18418c5e112815",
|
||||
"dir": "vendor/nim-testutils",
|
||||
"url": "https://github.com/status-im/nim-testutils"
|
||||
},
|
||||
"nim-toml-serialization": {
|
||||
"commit": "86d477136f105f04bfd0dd7c0e939593d81fc581",
|
||||
"dir": "vendor/nim-toml-serialization",
|
||||
"url": "https://github.com/status-im/nim-toml-serialization"
|
||||
},
|
||||
"nim-unittest2": {
|
||||
"commit": "b178f47527074964f76c395ad0dfc81cf118f379",
|
||||
"dir": "vendor/nim-unittest2",
|
||||
"url": "https://github.com/status-im/nim-unittest2"
|
||||
},
|
||||
"nim-websock": {
|
||||
"commit": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8",
|
||||
"dir": "vendor/nim-websock",
|
||||
"url": "https://github.com/status-im/nim-websock"
|
||||
},
|
||||
"nim-zlib": {
|
||||
"commit": "f34ca261efd90f118dc1647beefd2f7a69b05d93",
|
||||
"dir": "vendor/nim-zlib",
|
||||
"url": "https://github.com/status-im/nim-zlib"
|
||||
},
|
||||
"nim-stint": {
|
||||
"dir": "vendor/stint",
|
||||
"url": "https://github.com/status-im/nim-stint",
|
||||
"commit": "86621eced1dcfb5e25903019ebcfc76ed9128ec5"
|
||||
},
|
||||
"nimcrypto": {
|
||||
"commit": "24e006df85927f64916e60511620583b11403178",
|
||||
"dir": "vendor/nimcrypto",
|
||||
"url": "https://github.com/status-im/nimcrypto"
|
||||
},
|
||||
"npeg": {
|
||||
"commit": "b15a10e388b91b898c581dbbcb6a718d46b27d2f",
|
||||
"dir": "vendor/npeg",
|
||||
"url": "https://github.com/zevv/npeg"
|
||||
},
|
||||
"questionable": {
|
||||
"commit": "b3cf35ac450fd42c9ea83dc084f5cba2efc55da3",
|
||||
"dir": "vendor/questionable",
|
||||
"url": "https://github.com/codex-storage/questionable"
|
||||
},
|
||||
"upraises": {
|
||||
"commit": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2",
|
||||
"dir": "vendor/upraises",
|
||||
"url": "https://github.com/markspanbroek/upraises"
|
||||
}
|
||||
},
|
||||
"nimVersion": "1.6.14",
|
||||
"nimbleFile": {
|
||||
"content": "# Package\n\nversion = \"0.3.2\"\nauthor = \"Status Research & Development GmbH\"\ndescription = \"DHT based on the libp2p Kademlia spec\"\nlicense = \"MIT\"\nskipDirs = @[\"tests\"]\n\n\n# Dependencies\nrequires \"nim >= 1.2.0\"\nrequires \"secp256k1#2acbbdcc0e63002a013fff49f015708522875832\" # >= 0.5.2 & < 0.6.0\nrequires \"protobuf_serialization\" # >= 0.2.0 & < 0.3.0\nrequires \"nimcrypto == 0.5.4\"\nrequires \"bearssl#head\"\nrequires \"chronicles >= 0.10.2 & < 0.11.0\"\nrequires \"chronos == 3.2.0\" # >= 3.0.11 & < 3.1.0\nrequires \"libp2p#unstable\"\nrequires \"metrics\"\nrequires \"stew#head\"\nrequires \"stint\"\nrequires \"asynctest >= 0.3.1 & < 0.4.0\"\nrequires \"https://github.com/codex-storage/nim-datastore#head\"\nrequires \"questionable\"\n\ninclude \"build.nims\"\n\n",
|
||||
"filename": ""
|
||||
},
|
||||
"nimcfg": "############# begin Atlas config section ##########\n--noNimblePath\n--path:\"vendor/nim-secp256k1\"\n--path:\"vendor/nim-protobuf-serialization\"\n--path:\"vendor/nimcrypto\"\n--path:\"vendor/nim-bearssl\"\n--path:\"vendor/nim-chronicles\"\n--path:\"vendor/nim-chronos\"\n--path:\"vendor/nim-libp2p\"\n--path:\"vendor/nim-metrics\"\n--path:\"vendor/nim-stew\"\n--path:\"vendor/nim-stint\"\n--path:\"vendor/asynctest\"\n--path:\"vendor/nim-datastore\"\n--path:\"vendor/questionable\"\n--path:\"vendor/nim-faststreams\"\n--path:\"vendor/nim-serialization\"\n--path:\"vendor/npeg/src\"\n--path:\"vendor/nim-unittest2\"\n--path:\"vendor/nim-testutils\"\n--path:\"vendor/nim-json-serialization\"\n--path:\"vendor/nim-http-utils\"\n--path:\"vendor/dnsclient.nim/src\"\n--path:\"vendor/nim-websock\"\n--path:\"vendor/nim-results\"\n--path:\"vendor/nim-sqlite3-abi\"\n--path:\"vendor/upraises\"\n--path:\"vendor/nim-zlib\"\n############# end Atlas config section ##########\n"
|
||||
}
|
||||
87
build.nims
Normal file
87
build.nims
Normal file
@ -0,0 +1,87 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
|
||||
### Helper functions
|
||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
||||
var extra_params = params
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams():
|
||||
extra_params &= " " & param
|
||||
else:
|
||||
for i in 2..<paramCount():
|
||||
extra_params &= " " & paramStr(i)
|
||||
|
||||
let cmd = "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
|
||||
exec(cmd)
|
||||
|
||||
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, srcDir, params
|
||||
exec "build/" & name
|
||||
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
|
||||
task testCodex, "Build & run Codex tests":
|
||||
test "testCodex", params = "-d:codex_enable_proof_failures=true"
|
||||
|
||||
task testContracts, "Build & run Codex Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
test "testIntegration"
|
||||
|
||||
task build, "build codex binary":
|
||||
codexTask()
|
||||
|
||||
task test, "Run tests":
|
||||
testCodexTask()
|
||||
|
||||
task testAll, "Run all tests":
|
||||
testCodexTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
||||
|
||||
import strutils
|
||||
import os
|
||||
|
||||
task coverage, "generates code coverage report":
|
||||
var (output, exitCode) = gorgeEx("which lcov")
|
||||
if exitCode != 0:
|
||||
echo " ************************** ⛔️ ERROR ⛔️ **************************"
|
||||
echo " ** ERROR: lcov not found, it must be installed to run code **"
|
||||
echo " ** coverage locally **"
|
||||
echo " *****************************************************************"
|
||||
quit 1
|
||||
|
||||
(output, exitCode) = gorgeEx("gcov --version")
|
||||
if output.contains("Apple LLVM"):
|
||||
echo " ************************* ⚠️ WARNING ⚠️ *************************"
|
||||
echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
|
||||
echo " ** emulates an old version of gcov (4.2.0) and therefore **"
|
||||
echo " ** coverage results will differ than those on CI (which **"
|
||||
echo " ** uses a much newer version of gcov). **"
|
||||
echo " *****************************************************************"
|
||||
|
||||
var nimSrcs = " "
|
||||
for f in walkDirRec("codex", {pcFile}):
|
||||
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
|
||||
echo "======== Running Tests ======== "
|
||||
test "coverage", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release "
|
||||
exec("rm nimcache/coverage/*.c")
|
||||
rmDir("coverage"); mkDir("coverage")
|
||||
echo " ======== Running LCOV ======== "
|
||||
exec("lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info")
|
||||
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
|
||||
echo " ======== Generating HTML coverage report ======== "
|
||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
||||
echo " ======== Coverage report Done ======== "
|
||||
|
||||
task showCoverage, "open coverage html":
|
||||
echo " ======== Opening HTML coverage report in browser... ======== "
|
||||
if findExe("open") != "":
|
||||
exec("open coverage/report/index.html")
|
||||
53
codex.nimble
53
codex.nimble
@ -1,11 +1,10 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
version = "0.1.0"
|
||||
author = "Codex Team"
|
||||
description = "p2p data durability engine"
|
||||
license = "MIT"
|
||||
binDir = "build"
|
||||
srcDir = "."
|
||||
installFiles = @["build.nims"]
|
||||
|
||||
requires "nim >= 1.2.0"
|
||||
requires "asynctest >= 0.3.2 & < 0.4.0"
|
||||
@ -13,7 +12,7 @@ requires "bearssl >= 0.1.4"
|
||||
requires "chronicles >= 0.7.2"
|
||||
requires "chronos >= 2.5.2"
|
||||
requires "confutils"
|
||||
requires "ethers >= 0.2.4 & < 0.3.0"
|
||||
requires "ethers >= 0.5.0 & < 0.6.0"
|
||||
requires "libbacktrace"
|
||||
requires "libp2p"
|
||||
requires "metrics"
|
||||
@ -32,50 +31,4 @@ requires "blscurve"
|
||||
requires "libp2pdht"
|
||||
requires "eth"
|
||||
|
||||
when declared(namedBin):
|
||||
namedBin = {
|
||||
"codex/codex": "codex"
|
||||
}.toTable()
|
||||
|
||||
when not declared(getPathsClause):
|
||||
proc getPathsClause(): string = ""
|
||||
|
||||
### Helper functions
|
||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
|
||||
var extra_params = params
|
||||
when compiles(commandLineParams):
|
||||
for param in commandLineParams:
|
||||
extra_params &= " " & param
|
||||
else:
|
||||
for i in 2..<paramCount():
|
||||
extra_params &= " " & paramStr(i)
|
||||
|
||||
exec "nim " & getPathsClause() & " " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
|
||||
|
||||
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, srcDir, params
|
||||
exec "build/" & name
|
||||
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
|
||||
task testCodex, "Build & run Codex tests":
|
||||
test "testCodex", params = "-d:codex_enable_proof_failures=true"
|
||||
|
||||
task testContracts, "Build & run Codex Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
test "testIntegration"
|
||||
|
||||
task test, "Run tests":
|
||||
testCodexTask()
|
||||
|
||||
task testAll, "Run all tests":
|
||||
testCodexTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
||||
include "build.nims"
|
||||
|
||||
@ -11,7 +11,7 @@ import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/cid
|
||||
import pkg/metrics
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -14,7 +14,8 @@ import std/algorithm
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/[cid, switch]
|
||||
import pkg/metrics
|
||||
import pkg/stint
|
||||
|
||||
import ../../stores/blockstore
|
||||
@ -36,6 +37,13 @@ export peers, pendingblocks, payments, discovery
|
||||
logScope:
|
||||
topics = "codex blockexcengine"
|
||||
|
||||
declareCounter(codexBlockExchangeWantHaveListsSent, "codex blockexchange wantHave lists sent")
|
||||
declareCounter(codexBlockExchangeWantHaveListsReceived, "codex blockexchange wantHave lists received")
|
||||
declareCounter(codexBlockExchangeWantBlockListsSent, "codex blockexchange wantBlock lists sent")
|
||||
declareCounter(codexBlockExchangeWantBlockListsReceived, "codex blockexchange wantBlock lists received")
|
||||
declareCounter(codexBlockExchangeBlocksSent, "codex blockexchange blocks sent")
|
||||
declareCounter(codexBlockExchangeBlocksReceived, "codex blockexchange blocks received")
|
||||
|
||||
const
|
||||
DefaultMaxPeersPerRequest* = 10
|
||||
DefaultTaskQueueSize = 100
|
||||
@ -190,6 +198,8 @@ proc requestBlock*(
|
||||
|
||||
await b.sendWantBlock(cid, blockPeer)
|
||||
|
||||
codexBlockExchangeWantBlockListsSent.inc()
|
||||
|
||||
if (peers.len - 1) == 0:
|
||||
trace "No peers to send want list to", cid
|
||||
b.discovery.queueFindBlocksReq(@[cid])
|
||||
@ -197,6 +207,8 @@ proc requestBlock*(
|
||||
|
||||
await b.sendWantHave(cid, blockPeer, toSeq(b.peers))
|
||||
|
||||
codexBlockExchangeWantHaveListsSent.inc()
|
||||
|
||||
return await blk
|
||||
|
||||
proc blockPresenceHandler*(
|
||||
@ -297,6 +309,8 @@ proc blocksHandler*(
|
||||
trace "Unable to store block", cid = blk.cid
|
||||
|
||||
await b.resolveBlocks(blocks)
|
||||
codexBlockExchangeBlocksReceived.inc(blocks.len.int64)
|
||||
|
||||
let
|
||||
peerCtx = b.peers.get(peer)
|
||||
|
||||
@ -336,6 +350,9 @@ proc wantListHandler*(
|
||||
b.pricing.get(Pricing(price: 0.u256))
|
||||
.price.toBytesBE)
|
||||
|
||||
if e.wantType == WantType.WantHave:
|
||||
codexBlockExchangeWantHaveListsReceived.inc()
|
||||
|
||||
if not have and e.sendDontHave:
|
||||
trace "Adding dont have entry to presence response", cid = e.cid
|
||||
presence.add(
|
||||
@ -353,6 +370,7 @@ proc wantListHandler*(
|
||||
elif e.wantType == WantType.WantBlock:
|
||||
trace "Added entry to peer's want blocks list", cid = e.cid
|
||||
peerCtx.peerWants.add(e)
|
||||
codexBlockExchangeWantBlockListsReceived.inc()
|
||||
else:
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
@ -467,6 +485,9 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||
task.id,
|
||||
blocks)
|
||||
|
||||
codexBlockExchangeBlocksSent.inc(blocks.len.int64)
|
||||
|
||||
trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len
|
||||
# Remove successfully sent blocks
|
||||
task.peerWants.keepIf(
|
||||
proc(e: Entry): bool =
|
||||
@ -500,7 +521,7 @@ proc new*(
|
||||
peersPerRequest = DefaultMaxPeersPerRequest
|
||||
): BlockExcEngine =
|
||||
## Create new block exchange engine instance
|
||||
##
|
||||
##
|
||||
|
||||
let
|
||||
engine = BlockExcEngine(
|
||||
|
||||
@ -16,12 +16,15 @@ push: {.upraises: [].}
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/metrics
|
||||
|
||||
import ../../blocktype
|
||||
|
||||
logScope:
|
||||
topics = "codex pendingblocks"
|
||||
|
||||
declareGauge(codexBlockExchangePendingBlockRequests, "codex blockexchange pending block requests")
|
||||
|
||||
const
|
||||
DefaultBlockTimeout* = 10.minutes
|
||||
|
||||
@ -33,6 +36,9 @@ type
|
||||
PendingBlocksManager* = ref object of RootObj
|
||||
blocks*: Table[Cid, BlockReq] # pending Block requests
|
||||
|
||||
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||
codexBlockExchangePendingBlockRequests.set(p.blocks.len.int64)
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager,
|
||||
cid: Cid,
|
||||
@ -50,6 +56,7 @@ proc getWantHandle*(
|
||||
|
||||
trace "Adding pending future for block", cid, inFlight = p.blocks[cid].inFlight
|
||||
|
||||
p.updatePendingBlockGauge()
|
||||
return await p.blocks[cid].handle.wait(timeout)
|
||||
except CancelledError as exc:
|
||||
trace "Blocks cancelled", exc = exc.msg, cid
|
||||
@ -60,6 +67,7 @@ proc getWantHandle*(
|
||||
raise exc
|
||||
finally:
|
||||
p.blocks.del(cid)
|
||||
p.updatePendingBlockGauge()
|
||||
|
||||
proc resolve*(p: PendingBlocksManager,
|
||||
blocks: seq[Block]) =
|
||||
|
||||
@ -285,7 +285,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
if peer in b.peers:
|
||||
return b.peers.getOrDefault(peer, nil)
|
||||
|
||||
var getConn = proc(): Future[Connection] {.async.} =
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
||||
try:
|
||||
return await b.switch.dial(peer, Codec)
|
||||
except CatchableError as exc:
|
||||
|
||||
@ -14,7 +14,7 @@ import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/[cid, multicodec]
|
||||
import pkg/stew/byteutils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -106,6 +106,7 @@ type
|
||||
defaultValue: noCommand }: StartUpCommand
|
||||
|
||||
of noCommand:
|
||||
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
@ -292,9 +293,17 @@ proc defaultDataDir*(): string =
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
proc parseCmdArg*(T: type MultiAddress, input: string): T
|
||||
proc parseCmdArg*(T: typedesc[MultiAddress],
|
||||
input: string): MultiAddress
|
||||
{.upraises: [ValueError, LPError].} =
|
||||
MultiAddress.init($input).tryGet()
|
||||
var ma: MultiAddress
|
||||
let res = MultiAddress.init(input)
|
||||
if res.isOk:
|
||||
ma = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input=input, error=res.error()
|
||||
quit QuitFailure
|
||||
ma
|
||||
|
||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||
var res: SignedPeerRecord
|
||||
@ -337,6 +346,18 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||
|
||||
val = SignedPeerRecord.parseCmdArg(uri)
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||
without input =? r.readValue(string).catch, err:
|
||||
error "invalid MultiAddress configuration value", error = err.msg
|
||||
quit QuitFailure
|
||||
|
||||
let res = MultiAddress.init(input)
|
||||
if res.isOk:
|
||||
val = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input=input, error=res.error()
|
||||
quit QuitFailure
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var NBytes)
|
||||
{.upraises: [SerializationError, IOError].} =
|
||||
var value = 0'i64
|
||||
|
||||
@ -22,14 +22,14 @@ proc start*(clock: OnChainClock) {.async.} =
|
||||
return
|
||||
clock.started = true
|
||||
|
||||
proc onBlock(blck: Block) {.async, upraises:[].} =
|
||||
proc onBlock(blck: Block) {.upraises:[].} =
|
||||
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
|
||||
let computerTime = getTime()
|
||||
clock.offset = blockTime - computerTime
|
||||
clock.newBlock.fire()
|
||||
|
||||
if latestBlock =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
await onBlock(latestBlock)
|
||||
onBlock(latestBlock)
|
||||
|
||||
clock.subscription = await clock.provider.subscribe(onBlock)
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/strformat
|
||||
import std/sugar
|
||||
import pkg/chronicles
|
||||
import pkg/ethers
|
||||
import pkg/ethers/testing
|
||||
@ -36,7 +37,7 @@ proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
let tokenAddress = await market.contract.token()
|
||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||
|
||||
await token.approve(market.contract.address(), amount)
|
||||
discard await token.approve(market.contract.address(), amount)
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
return await market.signer.getAddress()
|
||||
@ -168,11 +169,13 @@ method canProofBeMarkedAsMissing*(
|
||||
trace "Proof can not be marked as missing", msg = e.msg
|
||||
return false
|
||||
|
||||
method subscribeRequests(market: OnChainMarket,
|
||||
method subscribeRequests*(market: OnChainMarket,
|
||||
callback: OnRequest):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: StorageRequested) {.upraises:[].} =
|
||||
callback(event.requestId, event.ask)
|
||||
callback(event.requestId,
|
||||
event.ask,
|
||||
event.expiry)
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -198,10 +201,18 @@ method subscribeSlotFreed*(market: OnChainMarket,
|
||||
callback: OnSlotFreed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: SlotFreed) {.upraises:[].} =
|
||||
callback(event.slotId)
|
||||
callback(event.requestId, event.slotIndex)
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(market: OnChainMarket,
|
||||
callback: OnFulfillment):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
|
||||
callback(event.requestId)
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
@ -212,6 +223,14 @@ method subscribeFulfillment(market: OnChainMarket,
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
callback: OnRequestCancelled):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestCancelled) {.upraises:[].} =
|
||||
callback(event.requestId)
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
@ -222,6 +241,14 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
callback: OnRequestFailed):
|
||||
Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(event: RequestFailed) {.upraises:[]} =
|
||||
callback(event.requestId)
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
@ -242,3 +269,24 @@ method subscribeProofSubmission*(market: OnChainMarket,
|
||||
|
||||
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
await subscription.eventSubscription.unsubscribe()
|
||||
|
||||
method queryPastStorageRequests*(market: OnChainMarket,
|
||||
blocksAgo: int):
|
||||
Future[seq[PastStorageRequest]] {.async.} =
|
||||
|
||||
let contract = market.contract
|
||||
let provider = contract.provider
|
||||
|
||||
let head = await provider.getBlockNumber()
|
||||
let fromBlock = BlockTag.init(head - blocksAgo.abs.u256)
|
||||
|
||||
let events = await contract.queryFilter(StorageRequested,
|
||||
fromBlock,
|
||||
BlockTag.latest)
|
||||
return events.map(event =>
|
||||
PastStorageRequest(
|
||||
requestId: event.requestId,
|
||||
ask: event.ask,
|
||||
expiry: event.expiry
|
||||
)
|
||||
)
|
||||
|
||||
@ -18,13 +18,13 @@ type
|
||||
StorageRequested* = object of Event
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: UInt256
|
||||
SlotFilled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex* {.indexed.}: UInt256
|
||||
slotId*: SlotId
|
||||
slotIndex*: UInt256
|
||||
SlotFreed* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotId*: SlotId
|
||||
slotIndex*: UInt256
|
||||
RequestFulfilled* = object of Event
|
||||
requestId* {.indexed.}: RequestId
|
||||
RequestCancelled* = object of Event
|
||||
|
||||
@ -4,6 +4,8 @@ import pkg/nimcrypto
|
||||
import pkg/ethers/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/json_serialization
|
||||
import pkg/upraises
|
||||
|
||||
export contractabi
|
||||
|
||||
@ -203,3 +205,17 @@ func price*(request: StorageRequest): UInt256 =
|
||||
|
||||
func size*(ask: StorageAsk): UInt256 =
|
||||
ask.slots.u256 * ask.slotSize
|
||||
|
||||
proc writeValue*(
|
||||
writer: var JsonWriter,
|
||||
value: SlotId | RequestId) {.upraises:[IOError].} =
|
||||
|
||||
mixin writeValue
|
||||
writer.writeValue value.toArray
|
||||
|
||||
proc readValue*[T: SlotId | RequestId](
|
||||
reader: var JsonReader,
|
||||
value: var T) {.upraises: [SerializationError, IOError].} =
|
||||
|
||||
mixin readValue
|
||||
value = T reader.readValue(T.distinctBase)
|
||||
|
||||
@ -11,14 +11,12 @@ import std/algorithm
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2p/signed_envelope
|
||||
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/shims/net
|
||||
import pkg/contractabi/address as ca
|
||||
import pkg/libp2pdht/discv5/protocol as discv5
|
||||
import pkg/codexdht/discv5/protocol as discv5
|
||||
|
||||
import ./rng
|
||||
import ./errors
|
||||
|
||||
@ -9,15 +9,20 @@
|
||||
|
||||
import pkg/stew/results
|
||||
|
||||
export results
|
||||
|
||||
type
|
||||
CodexError* = object of CatchableError # base codex error
|
||||
CodexResult*[T] = Result[T, ref CodexError]
|
||||
|
||||
template mapFailure*(
|
||||
exp: untyped,
|
||||
exc: typed = type CodexError
|
||||
): untyped =
|
||||
template mapFailure*[T, V, E](
|
||||
exp: Result[T, V],
|
||||
exc: typedesc[E],
|
||||
): Result[T, ref CatchableError] =
|
||||
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
||||
##
|
||||
|
||||
((exp.mapErr do (e: auto) -> ref CatchableError: (ref exc)(msg: $e)))
|
||||
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
|
||||
|
||||
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
|
||||
mapFailure(exp, CodexError)
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
import std/strutils
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/cid
|
||||
|
||||
func shortLog*(cid: Cid): string =
|
||||
## Returns compact string representation of ``pid``.
|
||||
|
||||
@ -26,7 +26,7 @@ import ../errors
|
||||
import ../blocktype
|
||||
import ./types
|
||||
|
||||
func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
||||
proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
||||
## Encode the manifest into a ``ManifestCodec``
|
||||
## multicodec container (Dag-pb) for now
|
||||
##
|
||||
@ -60,7 +60,7 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
|
||||
# ```
|
||||
#
|
||||
|
||||
let cid = !manifest.rootHash
|
||||
let cid = ? manifest.cid
|
||||
var header = initProtoBuffer()
|
||||
header.write(1, cid.data.buffer)
|
||||
header.write(2, manifest.blockSize.uint32)
|
||||
@ -145,22 +145,31 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
|
||||
if blocksLen.int != blocks.len:
|
||||
return failure("Total blocks and length of blocks in header don't match!")
|
||||
|
||||
var
|
||||
self = Manifest(
|
||||
rootHash: rootHashCid.some,
|
||||
originalBytes: originalBytes.NBytes,
|
||||
blockSize: blockSize.NBytes,
|
||||
blocks: blocks,
|
||||
hcodec: (? rootHashCid.mhash.mapFailure).mcodec,
|
||||
codec: rootHashCid.mcodec,
|
||||
version: rootHashCid.cidver,
|
||||
protected: pbErasureInfo.buffer.len > 0)
|
||||
|
||||
if self.protected:
|
||||
self.ecK = ecK.int
|
||||
self.ecM = ecM.int
|
||||
self.originalCid = ? Cid.init(originalCid).mapFailure
|
||||
self.originalLen = originalLen.int
|
||||
let
|
||||
self = if pbErasureInfo.buffer.len > 0:
|
||||
Manifest.new(
|
||||
rootHash = rootHashCid,
|
||||
originalBytes = originalBytes.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
blocks = blocks,
|
||||
version = rootHashCid.cidver,
|
||||
hcodec = (? rootHashCid.mhash.mapFailure).mcodec,
|
||||
codec = rootHashCid.mcodec,
|
||||
ecK = ecK.int,
|
||||
ecM = ecM.int,
|
||||
originalCid = ? Cid.init(originalCid).mapFailure,
|
||||
originalLen = originalLen.int
|
||||
)
|
||||
else:
|
||||
Manifest.new(
|
||||
rootHash = rootHashCid,
|
||||
originalBytes = originalBytes.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
blocks = blocks,
|
||||
version = rootHashCid.cidver,
|
||||
hcodec = (? rootHashCid.mhash.mapFailure).mcodec,
|
||||
codec = rootHashCid.mcodec
|
||||
)
|
||||
|
||||
? self.verify()
|
||||
self.success
|
||||
@ -172,9 +181,6 @@ proc encode*(
|
||||
## Encode a manifest using `encoder`
|
||||
##
|
||||
|
||||
if self.rootHash.isNone:
|
||||
? self.makeRoot()
|
||||
|
||||
encoder.encode(self)
|
||||
|
||||
func decode*(
|
||||
|
||||
@ -27,6 +27,58 @@ import ./types
|
||||
|
||||
export types
|
||||
|
||||
type
|
||||
Manifest* = ref object of RootObj
|
||||
rootHash: ?Cid # Root (tree) hash of the contained data set
|
||||
originalBytes*: NBytes # Exact size of the original (uploaded) file
|
||||
blockSize: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
blocks: seq[Cid] # Block Cid
|
||||
version: CidVersion # Cid version
|
||||
hcodec: MultiCodec # Multihash codec
|
||||
codec: MultiCodec # Data set codec
|
||||
case protected: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
ecK: int # Number of blocks to encode
|
||||
ecM: int # Number of resulting parity blocks
|
||||
originalCid: Cid # The original Cid of the dataset being erasure coded
|
||||
originalLen: int # The length of the original manifest
|
||||
else:
|
||||
discard
|
||||
|
||||
############################################################
|
||||
# Accessors
|
||||
############################################################
|
||||
|
||||
proc blockSize*(self: Manifest): NBytes =
|
||||
self.blockSize
|
||||
|
||||
proc blocks*(self: Manifest): seq[Cid] =
|
||||
self.blocks
|
||||
|
||||
proc version*(self: Manifest): CidVersion =
|
||||
self.version
|
||||
|
||||
proc hcodec*(self: Manifest): MultiCodec =
|
||||
self.hcodec
|
||||
|
||||
proc codec*(self: Manifest): MultiCodec =
|
||||
self.codec
|
||||
|
||||
proc protected*(self: Manifest): bool =
|
||||
self.protected
|
||||
|
||||
proc ecK*(self: Manifest): int =
|
||||
self.ecK
|
||||
|
||||
proc ecM*(self: Manifest): int =
|
||||
self.ecM
|
||||
|
||||
proc originalCid*(self: Manifest): Cid =
|
||||
self.originalCid
|
||||
|
||||
proc originalLen*(self: Manifest): int =
|
||||
self.originalLen
|
||||
|
||||
############################################################
|
||||
# Operations on block list
|
||||
############################################################
|
||||
@ -49,7 +101,8 @@ func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) =
|
||||
self.blocks[self.len - i.int] = item
|
||||
|
||||
func isManifest*(cid: Cid): ?!bool =
|
||||
($(?cid.contentType().mapFailure) in ManifestContainers).success
|
||||
let res = ?cid.contentType().mapFailure(CodexError)
|
||||
($(res) in ManifestContainers).success
|
||||
|
||||
func isManifest*(mc: MultiCodec): ?!bool =
|
||||
($mc in ManifestContainers).success
|
||||
@ -137,11 +190,8 @@ proc makeRoot*(self: Manifest): ?!void =
|
||||
stack.add(mh)
|
||||
|
||||
if stack.len == 1:
|
||||
let cid = ? Cid.init(
|
||||
self.version,
|
||||
self.codec,
|
||||
(? EmptyDigests[self.version][self.hcodec].catch))
|
||||
.mapFailure
|
||||
let digest = ? EmptyDigests[self.version][self.hcodec].catch
|
||||
let cid = ? Cid.init(self.version, self.codec, digest).mapFailure
|
||||
|
||||
self.rootHash = cid.some
|
||||
|
||||
@ -173,8 +223,8 @@ proc new*(
|
||||
## Create a manifest using an array of `Cid`s
|
||||
##
|
||||
|
||||
if hcodec notin EmptyDigests[version]:
|
||||
return failure("Unsupported manifest hash codec!")
|
||||
# if hcodec notin EmptyDigests[version]:
|
||||
# return failure("Unsupported manifest hash codec!")
|
||||
|
||||
T(
|
||||
blocks: @blocks,
|
||||
@ -231,5 +281,55 @@ proc new*(
|
||||
decoder = ManifestContainers[$DagPBCodec]
|
||||
): ?!Manifest =
|
||||
## Create a manifest instance from given data
|
||||
##
|
||||
##
|
||||
Manifest.decode(data, decoder)
|
||||
|
||||
proc new*(
|
||||
T: type Manifest,
|
||||
rootHash: Cid,
|
||||
originalBytes: NBytes,
|
||||
blockSize: NBytes,
|
||||
blocks: seq[Cid],
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
codec: MultiCodec,
|
||||
ecK: int,
|
||||
ecM: int,
|
||||
originalCid: Cid,
|
||||
originalLen: int
|
||||
): Manifest =
|
||||
Manifest(
|
||||
rootHash: rootHash.some,
|
||||
originalBytes: originalBytes,
|
||||
blockSize: blockSize,
|
||||
blocks: blocks,
|
||||
version: version,
|
||||
hcodec: hcodec,
|
||||
codec: codec,
|
||||
protected: true,
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
originalCid: originalCid,
|
||||
originalLen: originalLen
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: type Manifest,
|
||||
rootHash: Cid,
|
||||
originalBytes: NBytes,
|
||||
blockSize: NBytes,
|
||||
blocks: seq[Cid],
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
codec: MultiCodec
|
||||
): Manifest =
|
||||
Manifest(
|
||||
rootHash: rootHash.some,
|
||||
originalBytes: originalBytes,
|
||||
blockSize: blockSize,
|
||||
blocks: blocks,
|
||||
version: version,
|
||||
hcodec: hcodec,
|
||||
codec: codec,
|
||||
protected: false,
|
||||
)
|
||||
|
||||
@ -28,21 +28,3 @@ const
|
||||
ManifestContainers* = {
|
||||
$DagPBCodec: DagPBCoder()
|
||||
}.toTable
|
||||
|
||||
type
|
||||
Manifest* = ref object of RootObj
|
||||
rootHash*: ?Cid # Root (tree) hash of the contained data set
|
||||
originalBytes*: NBytes # Exact size of the original (uploaded) file
|
||||
blockSize*: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
blocks*: seq[Cid] # Block Cid
|
||||
version*: CidVersion # Cid version
|
||||
hcodec*: MultiCodec # Multihash codec
|
||||
codec*: MultiCodec # Data set codec
|
||||
case protected*: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
ecK*: int # Number of blocks to encode
|
||||
ecM*: int # Number of resulting parity blocks
|
||||
originalCid*: Cid # The original Cid of the dataset being erasure coded
|
||||
originalLen*: int # The length of the original manifest
|
||||
else:
|
||||
discard
|
||||
|
||||
@ -15,13 +15,19 @@ export periods
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* = proc(id: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].}
|
||||
OnRequest* = proc(id: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) {.gcsafe, upraises:[].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
|
||||
OnSlotFreed* = proc(slotId: SlotId) {.gcsafe, upraises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
|
||||
OnProofSubmitted* = proc(id: SlotId, proof: seq[byte]) {.gcsafe, upraises:[].}
|
||||
PastStorageRequest* = object
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: UInt256
|
||||
|
||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
@ -112,6 +118,11 @@ method canProofBeMarkedAsMissing*(market: Market,
|
||||
period: Period): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(market: Market,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
@ -135,12 +146,22 @@ method subscribeSlotFreed*(market: Market,
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(market: Market,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(market: Market,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(market: Market,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
@ -154,3 +175,8 @@ method subscribeProofSubmission*(market: Market,
|
||||
|
||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequests*(market: Market,
|
||||
blocksAgo: int):
|
||||
Future[seq[PastStorageRequest]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
189
codex/merkletree/merkletree.nim
Normal file
189
codex/merkletree/merkletree.nim
Normal file
@ -0,0 +1,189 @@
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
import std/math
|
||||
import std/bitops
|
||||
import std/sugar
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/stew/byteutils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
type
|
||||
MerkleHash* = MultiHash
|
||||
MerkleTree* = object
|
||||
leavesCount: int
|
||||
nodes: seq[MerkleHash]
|
||||
MerkleProof* = object
|
||||
index: int
|
||||
path: seq[MerkleHash]
|
||||
|
||||
# Tree constructed from leaves H0..H2 is
|
||||
# H5=H(H3 & H4)
|
||||
# / \
|
||||
# H3=H(H0 & H1) H4=H(H2 & H2)
|
||||
# / \ /
|
||||
# H0=H(A) H1=H(B) H2=H(C)
|
||||
# | | |
|
||||
# A B C
|
||||
#
|
||||
# Memory layout is [H0, H1, H2, H3, H4, H5]
|
||||
#
|
||||
# Proofs of inclusion are
|
||||
# - [H1, H4] for A
|
||||
# - [H0, H4] for B
|
||||
# - [H2, H3] for C
|
||||
|
||||
|
||||
func computeTreeHeight(leavesCount: int): int =
|
||||
if isPowerOfTwo(leavesCount):
|
||||
fastLog2(leavesCount) + 1
|
||||
else:
|
||||
fastLog2(leavesCount) + 2
|
||||
|
||||
func getLowHigh(leavesCount, level: int): (int, int) =
|
||||
var width = leavesCount
|
||||
var low = 0
|
||||
for _ in 0..<level:
|
||||
low += width
|
||||
width = (width + 1) div 2
|
||||
|
||||
(low, low + width - 1)
|
||||
|
||||
func getLowHigh(self: MerkleTree, level: int): (int, int) =
|
||||
getLowHigh(self.leavesCount, level)
|
||||
|
||||
func computeTotalSize(leavesCount: int): int =
|
||||
let height = computeTreeHeight(leavesCount)
|
||||
getLowHigh(leavesCount, height - 1)[1] + 1
|
||||
|
||||
proc getWidth(self: MerkleTree, level: int): int =
|
||||
let (low, high) = self.getLowHigh(level)
|
||||
high - low + 1
|
||||
|
||||
func getChildren(self: MerkleTree, level, i: int): (MerkleHash, MerkleHash) =
|
||||
let (low, high) = self.getLowHigh(level - 1)
|
||||
let leftIdx = low + 2 * i
|
||||
let rightIdx = min(leftIdx + 1, high)
|
||||
|
||||
(self.nodes[leftIdx], self.nodes[rightIdx])
|
||||
|
||||
func getSibling(self: MerkleTree, level, i: int): MerkleHash =
|
||||
let (low, high) = self.getLowHigh(level)
|
||||
if i mod 2 == 0:
|
||||
self.nodes[min(low + i + 1, high)]
|
||||
else:
|
||||
self.nodes[low + i - 1]
|
||||
|
||||
proc setNode(self: var MerkleTree, level, i: int, value: MerkleHash): void =
|
||||
let (low, _) = self.getLowHigh(level)
|
||||
self.nodes[low + i] = value
|
||||
|
||||
proc root*(self: MerkleTree): MerkleHash =
|
||||
self.nodes[^1]
|
||||
|
||||
proc len*(self: MerkleTree): int =
|
||||
self.nodes.len
|
||||
|
||||
proc leaves*(self: MerkleTree): seq[MerkleHash] =
|
||||
self.nodes[0..<self.leavesCount]
|
||||
|
||||
proc nodes*(self: MerkleTree): seq[MerkleHash] =
|
||||
self.nodes
|
||||
|
||||
proc height*(self: MerkleTree): int =
|
||||
computeTreeHeight(self.leavesCount)
|
||||
|
||||
proc `$`*(self: MerkleTree): string =
|
||||
result &= "leavesCount: " & $self.leavesCount
|
||||
result &= "\nnodes: " & $self.nodes
|
||||
|
||||
proc getProof*(self: MerkleTree, index: int): ?!MerkleProof =
|
||||
if index >= self.leavesCount or index < 0:
|
||||
return failure("Index " & $index & " out of range [0.." & $self.leaves.high & "]" )
|
||||
|
||||
var path = newSeq[MerkleHash](self.height - 1)
|
||||
for level in 0..<path.len:
|
||||
let i = index div (1 shl level)
|
||||
path[level] = self.getSibling(level, i)
|
||||
|
||||
success(MerkleProof(index: index, path: path))
|
||||
|
||||
proc initTreeFromLeaves(leaves: openArray[MerkleHash]): ?!MerkleTree =
|
||||
without mcodec =? leaves.?[0].?mcodec and
|
||||
digestSize =? leaves.?[0].?size:
|
||||
return failure("At least one leaf is required")
|
||||
|
||||
if not leaves.allIt(it.mcodec == mcodec):
|
||||
return failure("All leaves must use the same codec")
|
||||
|
||||
let totalSize = computeTotalSize(leaves.len)
|
||||
var tree = MerkleTree(leavesCount: leaves.len, nodes: newSeq[MerkleHash](totalSize))
|
||||
|
||||
var buf = newSeq[byte](digestSize * 2)
|
||||
proc combine(l, r: MerkleHash): ?!MerkleHash =
|
||||
copyMem(addr buf[0], unsafeAddr l.data.buffer[0], digestSize)
|
||||
copyMem(addr buf[digestSize], unsafeAddr r.data.buffer[0], digestSize)
|
||||
|
||||
MultiHash.digest($mcodec, buf).mapErr(
|
||||
c => newException(CatchableError, "Error calculating hash using codec " & $mcodec & ": " & $c)
|
||||
)
|
||||
|
||||
# copy leaves
|
||||
for i in 0..<tree.getWidth(0):
|
||||
tree.setNode(0, i, leaves[i])
|
||||
|
||||
# calculate intermediate nodes
|
||||
for level in 1..<tree.height:
|
||||
for i in 0..<tree.getWidth(level):
|
||||
let (left, right) = tree.getChildren(level, i)
|
||||
|
||||
without mhash =? combine(left, right), error:
|
||||
return failure(error)
|
||||
tree.setNode(level, i, mhash)
|
||||
|
||||
success(tree)
|
||||
|
||||
func init*(
|
||||
T: type MerkleTree,
|
||||
root: MerkleHash,
|
||||
leavesCount: int
|
||||
): MerkleTree =
|
||||
let totalSize = computeTotalSize(leavesCount)
|
||||
var nodes = newSeq[MerkleHash](totalSize)
|
||||
nodes[^1] = root
|
||||
MerkleTree(nodes: nodes, leavesCount: leavesCount)
|
||||
|
||||
proc init*(
|
||||
T: type MerkleTree,
|
||||
leaves: openArray[MerkleHash]
|
||||
): ?!MerkleTree =
|
||||
initTreeFromLeaves(leaves)
|
||||
|
||||
proc index*(self: MerkleProof): int =
|
||||
self.index
|
||||
|
||||
proc path*(self: MerkleProof): seq[MerkleHash] =
|
||||
self.path
|
||||
|
||||
proc `$`*(self: MerkleProof): string =
|
||||
result &= "index: " & $self.index
|
||||
result &= "\npath: " & $self.path
|
||||
|
||||
func `==`*(a, b: MerkleProof): bool =
|
||||
(a.index == b.index) and (a.path == b.path)
|
||||
|
||||
proc init*(
|
||||
T: type MerkleProof,
|
||||
index: int,
|
||||
path: seq[MerkleHash]
|
||||
): MerkleProof =
|
||||
MerkleProof(index: index, path: path)
|
||||
@ -11,6 +11,7 @@ const
|
||||
# Namespaces
|
||||
CodexMetaNamespace* = "meta" # meta info stored here
|
||||
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
|
||||
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
||||
CodexBlocksTtlNamespace* = # Cid TTL
|
||||
|
||||
126
codex/node.nim
126
codex/node.nim
@ -16,7 +16,9 @@ import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
import pkg/libp2p/switch
|
||||
import pkg/libp2p/stream/bufferstream
|
||||
|
||||
# TODO: remove once exported by libp2p
|
||||
import pkg/libp2p/routing_record
|
||||
@ -60,23 +62,21 @@ type
|
||||
|
||||
proc findPeer*(
|
||||
node: CodexNodeRef,
|
||||
peerId: PeerId
|
||||
): Future[?PeerRecord] {.async.} =
|
||||
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||
## Find peer using the discovery service from the given CodexNode
|
||||
##
|
||||
##
|
||||
return await node.discovery.findPeer(peerId)
|
||||
|
||||
proc connect*(
|
||||
node: CodexNodeRef,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress]
|
||||
node: CodexNodeRef,
|
||||
peerId: PeerId,
|
||||
addrs: seq[MultiAddress]
|
||||
): Future[void] =
|
||||
node.switch.connect(peerId, addrs)
|
||||
|
||||
proc fetchManifest*(
|
||||
node: CodexNodeRef,
|
||||
cid: Cid
|
||||
): Future[?!Manifest] {.async.} =
|
||||
node: CodexNodeRef,
|
||||
cid: Cid): Future[?!Manifest] {.async.} =
|
||||
## Fetch and decode a manifest block
|
||||
##
|
||||
|
||||
@ -100,11 +100,10 @@ proc fetchManifest*(
|
||||
return manifest.success
|
||||
|
||||
proc fetchBatched*(
|
||||
node: CodexNodeRef,
|
||||
manifest: Manifest,
|
||||
batchSize = FetchBatch,
|
||||
onBatch: BatchProc = nil
|
||||
): Future[?!void] {.async, gcsafe.} =
|
||||
node: CodexNodeRef,
|
||||
manifest: Manifest,
|
||||
batchSize = FetchBatch,
|
||||
onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} =
|
||||
## Fetch manifest in batches of `batchSize`
|
||||
##
|
||||
|
||||
@ -130,9 +129,8 @@ proc fetchBatched*(
|
||||
return success()
|
||||
|
||||
proc retrieve*(
|
||||
node: CodexNodeRef,
|
||||
cid: Cid
|
||||
): Future[?!LPStream] {.async.} =
|
||||
node: CodexNodeRef,
|
||||
cid: Cid): Future[?!LPStream] {.async.} =
|
||||
## Retrieve by Cid a single block or an entire dataset described by manifest
|
||||
##
|
||||
|
||||
@ -147,47 +145,35 @@ proc retrieve*(
|
||||
trace "Unable to erasure decode manifest", cid, exc = error.msg
|
||||
except CatchableError as exc:
|
||||
trace "Exception decoding manifest", cid, exc = exc.msg
|
||||
#
|
||||
|
||||
asyncSpawn erasureJob()
|
||||
# else:
|
||||
# # Prefetch the entire dataset into the local store
|
||||
# proc prefetchBlocks() {.async, raises: [Defect].} =
|
||||
# try:
|
||||
# discard await node.fetchBatched(manifest)
|
||||
# except CatchableError as exc:
|
||||
# trace "Exception prefetching blocks", exc = exc.msg
|
||||
# #
|
||||
# # asyncSpawn prefetchBlocks() - temporarily commented out
|
||||
#
|
||||
|
||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||
trace "Creating store stream for manifest", cid
|
||||
return LPStream(StoreStream.new(node.blockStore, manifest, pad = false)).success
|
||||
LPStream(StoreStream.new(node.blockStore, manifest, pad = false)).success
|
||||
else:
|
||||
let
|
||||
stream = BufferStream.new()
|
||||
|
||||
let
|
||||
stream = BufferStream.new()
|
||||
without blk =? (await node.blockStore.getBlock(cid)), err:
|
||||
return failure(err)
|
||||
|
||||
without blk =? (await node.blockStore.getBlock(cid)), err:
|
||||
return failure(err)
|
||||
proc streamOneBlock(): Future[void] {.async.} =
|
||||
try:
|
||||
await stream.pushData(blk.data)
|
||||
except CatchableError as exc:
|
||||
trace "Unable to send block", cid, exc = exc.msg
|
||||
discard
|
||||
finally:
|
||||
await stream.pushEof()
|
||||
|
||||
proc streamOneBlock(): Future[void] {.async.} =
|
||||
try:
|
||||
await stream.pushData(blk.data)
|
||||
except CatchableError as exc:
|
||||
trace "Unable to send block", cid, exc = exc.msg
|
||||
discard
|
||||
finally:
|
||||
await stream.pushEof()
|
||||
|
||||
asyncSpawn streamOneBlock()
|
||||
return LPStream(stream).success()
|
||||
|
||||
return failure("Unable to retrieve Cid!")
|
||||
asyncSpawn streamOneBlock()
|
||||
LPStream(stream).success()
|
||||
|
||||
proc store*(
|
||||
self: CodexNodeRef,
|
||||
stream: LPStream,
|
||||
blockSize = DefaultBlockSize
|
||||
): Future[?!Cid] {.async.} =
|
||||
self: CodexNodeRef,
|
||||
stream: LPStream,
|
||||
blockSize = DefaultBlockSize): Future[?!Cid] {.async.} =
|
||||
## Save stream contents as dataset with given blockSize
|
||||
## to nodes's BlockStore, and return Cid of its manifest
|
||||
##
|
||||
@ -221,7 +207,7 @@ proc store*(
|
||||
await stream.close()
|
||||
|
||||
# Generate manifest
|
||||
blockManifest.originalBytes = NBytes chunker.offset # store the exact file size
|
||||
blockManifest.originalBytes = NBytes(chunker.offset) # store the exact file size
|
||||
without data =? blockManifest.encode():
|
||||
return failure(
|
||||
newException(CodexError, "Could not generate dataset manifest!"))
|
||||
@ -249,16 +235,15 @@ proc store*(
|
||||
return manifest.cid.success
|
||||
|
||||
proc requestStorage*(
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: UInt256,
|
||||
proofProbability: UInt256,
|
||||
nodes: uint,
|
||||
tolerance: uint,
|
||||
reward: UInt256,
|
||||
collateral: UInt256,
|
||||
expiry = UInt256.none
|
||||
): Future[?!PurchaseId] {.async.} =
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: UInt256,
|
||||
proofProbability: UInt256,
|
||||
nodes: uint,
|
||||
tolerance: uint,
|
||||
reward: UInt256,
|
||||
collateral: UInt256,
|
||||
expiry = UInt256.none): Future[?!PurchaseId] {.async.} =
|
||||
## Initiate a request for storage sequence, this might
|
||||
## be a multistep procedure.
|
||||
##
|
||||
@ -323,16 +308,15 @@ proc requestStorage*(
|
||||
return success purchase.id
|
||||
|
||||
proc new*(
|
||||
T: type CodexNodeRef,
|
||||
switch: Switch,
|
||||
store: BlockStore,
|
||||
engine: BlockExcEngine,
|
||||
erasure: Erasure,
|
||||
discovery: Discovery,
|
||||
contracts = Contracts.default
|
||||
): CodexNodeRef =
|
||||
T: type CodexNodeRef,
|
||||
switch: Switch,
|
||||
store: BlockStore,
|
||||
engine: BlockExcEngine,
|
||||
erasure: Erasure,
|
||||
discovery: Discovery,
|
||||
contracts = Contracts.default): CodexNodeRef =
|
||||
## Create new instance of a Codex node, call `start` to run it
|
||||
##
|
||||
##
|
||||
CodexNodeRef(
|
||||
switch: switch,
|
||||
blockStore: store,
|
||||
|
||||
@ -31,13 +31,14 @@ func new*(
|
||||
clock: Clock
|
||||
): Purchase =
|
||||
## create a new instance of a Purchase
|
||||
##
|
||||
Purchase(
|
||||
future: Future[void].new(),
|
||||
requestId: requestId,
|
||||
market: market,
|
||||
clock: clock
|
||||
)
|
||||
##
|
||||
var purchase = Purchase.new()
|
||||
purchase.future = Future[void].new()
|
||||
purchase.requestId = requestId
|
||||
purchase.market = market
|
||||
purchase.clock = clock
|
||||
|
||||
return purchase
|
||||
|
||||
func new*(
|
||||
_: type Purchase,
|
||||
|
||||
@ -1,13 +1,17 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./error
|
||||
|
||||
declareCounter(codexPurchasesCancelled, "codex purchases cancelled")
|
||||
|
||||
type PurchaseCancelled* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseCancelled): string =
|
||||
"cancelled"
|
||||
|
||||
method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesCancelled.inc()
|
||||
let purchase = Purchase(machine)
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
|
||||
declareCounter(codexPurchasesError, "codex purchases error")
|
||||
|
||||
type PurchaseErrored* = ref object of PurchaseState
|
||||
error*: ref CatchableError
|
||||
|
||||
@ -7,5 +10,6 @@ method `$`*(state: PurchaseErrored): string =
|
||||
"errored"
|
||||
|
||||
method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesError.inc()
|
||||
let purchase = Purchase(machine)
|
||||
purchase.future.fail(state.error)
|
||||
|
||||
@ -1,6 +1,9 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./error
|
||||
|
||||
declareCounter(codexPurchasesFailed, "codex purchases failed")
|
||||
|
||||
type
|
||||
PurchaseFailed* = ref object of PurchaseState
|
||||
|
||||
@ -8,5 +11,6 @@ method `$`*(state: PurchaseFailed): string =
|
||||
"failed"
|
||||
|
||||
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesFailed.inc()
|
||||
let error = newException(PurchaseError, "Purchase failed")
|
||||
return some State(PurchaseErrored(error: error))
|
||||
|
||||
@ -1,10 +1,14 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
|
||||
declareCounter(codexPurchasesFinished, "codex purchases finished")
|
||||
|
||||
type PurchaseFinished* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseFinished): string =
|
||||
"finished"
|
||||
|
||||
method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesFinished.inc()
|
||||
let purchase = Purchase(machine)
|
||||
purchase.future.complete()
|
||||
|
||||
@ -1,13 +1,17 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
|
||||
declareCounter(codexPurchasesPending, "codex purchases pending")
|
||||
|
||||
type PurchasePending* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchasePending): string =
|
||||
"pending"
|
||||
|
||||
method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesPending.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
|
||||
@ -1,14 +1,18 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./finished
|
||||
import ./failed
|
||||
|
||||
declareCounter(codexPurchasesStarted, "codex purchases started")
|
||||
|
||||
type PurchaseStarted* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseStarted): string =
|
||||
"started"
|
||||
|
||||
method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesStarted.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
let clock = purchase.clock
|
||||
|
||||
@ -1,14 +1,18 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./started
|
||||
import ./cancelled
|
||||
|
||||
declareCounter(codexPurchasesSubmitted, "codex purchases submitted")
|
||||
|
||||
type PurchaseSubmitted* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseSubmitted): string =
|
||||
"submitted"
|
||||
|
||||
method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesSubmitted.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
let market = purchase.market
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
@ -6,12 +7,15 @@ import ./cancelled
|
||||
import ./finished
|
||||
import ./failed
|
||||
|
||||
declareCounter(codexPurchasesUnknown, "codex purchases unknown")
|
||||
|
||||
type PurchaseUnknown* = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: PurchaseUnknown): string =
|
||||
"unknown"
|
||||
|
||||
method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} =
|
||||
codexPurchasesUnknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
|
||||
@ -20,15 +20,16 @@ import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/presto
|
||||
import pkg/libp2p
|
||||
import pkg/metrics
|
||||
import pkg/stew/base10
|
||||
import pkg/stew/byteutils
|
||||
import pkg/confutils
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2pdht/discv5/spr as spr
|
||||
import pkg/libp2pdht/discv5/routing_table as rt
|
||||
import pkg/libp2pdht/discv5/node as dn
|
||||
import pkg/codexdht/discv5/spr as spr
|
||||
import pkg/codexdht/discv5/routing_table as rt
|
||||
import pkg/codexdht/discv5/node as dn
|
||||
|
||||
import ../node
|
||||
import ../blocktype
|
||||
@ -42,6 +43,9 @@ import ./json
|
||||
logScope:
|
||||
topics = "codex restapi"
|
||||
|
||||
declareCounter(codexApiUploads, "codex API uploads")
|
||||
declareCounter(codexApiDownloads, "codex API downloads")
|
||||
|
||||
proc validate(
|
||||
pattern: string,
|
||||
value: string): int
|
||||
@ -164,6 +168,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
||||
trace "Sending chunk", size = buff.len
|
||||
await resp.sendChunk(addr buff[0], buff.len)
|
||||
await resp.finish()
|
||||
codexApiDownloads.inc()
|
||||
except CatchableError as exc:
|
||||
trace "Excepting streaming blocks", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
@ -238,6 +243,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
||||
trace "Error uploading file", exc = error.msg
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
codexApiUploads.inc()
|
||||
trace "Uploaded file", cid
|
||||
return RestApiResponse.response($cid)
|
||||
except CancelledError:
|
||||
|
||||
343
codex/sales.nim
343
codex/sales.nim
@ -1,20 +1,24 @@
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import std/tables
|
||||
import pkg/questionable
|
||||
import pkg/upraises
|
||||
import pkg/stint
|
||||
import pkg/chronicles
|
||||
import pkg/datastore
|
||||
import ./rng
|
||||
import ./market
|
||||
import ./clock
|
||||
import ./proving
|
||||
import ./stores
|
||||
import ./contracts/requests
|
||||
import ./contracts/marketplace
|
||||
import ./sales/salescontext
|
||||
import ./sales/salesagent
|
||||
import ./sales/statemachine
|
||||
import ./sales/states/downloading
|
||||
import ./sales/slotqueue
|
||||
import ./sales/states/preparing
|
||||
import ./sales/states/unknown
|
||||
import ./utils/then
|
||||
import ./utils/trackedfutures
|
||||
|
||||
## Sales holds a list of available storage that it may sell.
|
||||
##
|
||||
@ -38,13 +42,15 @@ export stint
|
||||
export reservations
|
||||
|
||||
logScope:
|
||||
topics = "sales"
|
||||
topics = "sales marketplace"
|
||||
|
||||
type
|
||||
Sales* = ref object
|
||||
context*: SalesContext
|
||||
subscription*: ?market.Subscription
|
||||
agents*: seq[SalesAgent]
|
||||
running: bool
|
||||
subscriptions: seq[market.Subscription]
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc `onStore=`*(sales: Sales, onStore: OnStore) =
|
||||
sales.context.onStore = some onStore
|
||||
@ -67,37 +73,47 @@ func new*(_: type Sales,
|
||||
proving: Proving,
|
||||
repo: RepoStore): Sales =
|
||||
|
||||
Sales(context: SalesContext(
|
||||
market: market,
|
||||
clock: clock,
|
||||
proving: proving,
|
||||
reservations: Reservations.new(repo)
|
||||
))
|
||||
let reservations = Reservations.new(repo)
|
||||
Sales(
|
||||
context: SalesContext(
|
||||
market: market,
|
||||
clock: clock,
|
||||
proving: proving,
|
||||
reservations: reservations,
|
||||
slotQueue: SlotQueue.new(reservations)
|
||||
),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
subscriptions: @[]
|
||||
)
|
||||
|
||||
proc randomSlotIndex(numSlots: uint64): UInt256 =
|
||||
let rng = Rng.instance
|
||||
let slotIndex = rng.rand(numSlots - 1)
|
||||
return slotIndex.u256
|
||||
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
||||
await agent.stop()
|
||||
if sales.running:
|
||||
sales.agents.keepItIf(it != agent)
|
||||
|
||||
proc handleRequest(sales: Sales,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk) =
|
||||
proc cleanUp(sales: Sales,
|
||||
agent: SalesAgent,
|
||||
processing: Future[void]) {.async.} =
|
||||
await sales.remove(agent)
|
||||
# signal back to the slot queue to cycle a worker
|
||||
if not processing.isNil and not processing.finished():
|
||||
processing.complete()
|
||||
|
||||
debug "handling storage requested",
|
||||
slots = ask.slots, slotSize = ask.slotSize, duration = ask.duration,
|
||||
reward = ask.reward, maxSlotLoss = ask.maxSlotLoss
|
||||
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
||||
debug "processing slot from queue", requestId = $item.requestId,
|
||||
slot = item.slotIndex
|
||||
|
||||
# TODO: check if random slot is actually available (not already filled)
|
||||
let slotIndex = randomSlotIndex(ask.slots)
|
||||
let agent = newSalesAgent(
|
||||
sales.context,
|
||||
requestId,
|
||||
slotIndex,
|
||||
item.requestId,
|
||||
item.slotIndex.u256,
|
||||
none StorageRequest
|
||||
)
|
||||
agent.context.onIgnored = proc {.gcsafe, upraises:[].} =
|
||||
sales.agents.keepItIf(it != agent)
|
||||
agent.start(SaleDownloading())
|
||||
|
||||
agent.context.onCleanUp = proc {.async.} =
|
||||
await sales.cleanUp(agent, done)
|
||||
|
||||
agent.start(SalePreparing())
|
||||
sales.agents.add agent
|
||||
|
||||
proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} =
|
||||
@ -105,6 +121,7 @@ proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} =
|
||||
let slotIds = await market.mySlots()
|
||||
var slots: seq[Slot] = @[]
|
||||
|
||||
info "Loading active slots", slotsCount = len(slots)
|
||||
for slotId in slotIds:
|
||||
if slot =? (await market.getActiveSlot(slotId)):
|
||||
slots.add slot
|
||||
@ -120,27 +137,273 @@ proc load*(sales: Sales) {.async.} =
|
||||
slot.request.id,
|
||||
slot.slotIndex,
|
||||
some slot.request)
|
||||
|
||||
agent.context.onCleanUp = proc {.async.} = await sales.remove(agent)
|
||||
|
||||
agent.start(SaleUnknown())
|
||||
sales.agents.add agent
|
||||
|
||||
proc start*(sales: Sales) {.async.} =
|
||||
doAssert sales.subscription.isNone, "Sales already started"
|
||||
proc onReservationAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
## Query last 256 blocks for new requests, adding them to the queue. `push`
|
||||
## checks for availability before adding to the queue. If processed, the
|
||||
## sales agent will check if the slot is free.
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onRequest(requestId: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].} =
|
||||
sales.handleRequest(requestId, ask)
|
||||
logScope:
|
||||
topics = "sales onReservationAdded callback"
|
||||
|
||||
trace "reservation added, querying past storage requests to add to queue"
|
||||
|
||||
try:
|
||||
sales.subscription = some await sales.context.market.subscribeRequests(onRequest)
|
||||
let events = await market.queryPastStorageRequests(256)
|
||||
let requests = events.map(event =>
|
||||
SlotQueueItem.init(event.requestId, event.ask, event.expiry)
|
||||
)
|
||||
|
||||
trace "found past storage requested events to add to queue",
|
||||
events = events.len
|
||||
|
||||
for slots in requests:
|
||||
for slot in slots:
|
||||
if err =? (await queue.push(slot)).errorOption:
|
||||
# continue on error
|
||||
if err of QueueNotRunningError:
|
||||
warn "cannot push items to queue, queue is not running"
|
||||
elif err of NoMatchingAvailabilityError:
|
||||
info "slot in queue had no matching availabilities, ignoring"
|
||||
elif err of SlotsOutOfRangeError:
|
||||
warn "Too many slots, cannot add to queue"
|
||||
elif err of SlotQueueItemExistsError:
|
||||
trace "item already exists, ignoring"
|
||||
discard
|
||||
else: raise err
|
||||
|
||||
except CatchableError as e:
|
||||
error "Unable to start sales", msg = e.msg
|
||||
warn "Error adding request to SlotQueue", error = e.msg
|
||||
discard
|
||||
|
||||
proc onStorageRequested(sales: Sales,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) =
|
||||
|
||||
logScope:
|
||||
topics = "sales onStorageRequested"
|
||||
requestId
|
||||
slots = ask.slots
|
||||
expiry
|
||||
|
||||
let slotQueue = sales.context.slotQueue
|
||||
|
||||
trace "storage requested, adding slots to queue"
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
|
||||
if err of SlotsOutOfRangeError:
|
||||
warn "Too many slots, cannot add to queue"
|
||||
else:
|
||||
warn "Failed to create slot queue items from request", error = err.msg
|
||||
|
||||
for item in items:
|
||||
# continue on failure
|
||||
slotQueue.push(item)
|
||||
.track(sales)
|
||||
.catch(proc(err: ref CatchableError) =
|
||||
if err of NoMatchingAvailabilityError:
|
||||
info "slot in queue had no matching availabilities, ignoring"
|
||||
elif err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists"
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running"
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
)
|
||||
|
||||
proc onSlotFreed(sales: Sales,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256) =
|
||||
|
||||
logScope:
|
||||
topics = "sales onSlotFreed"
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
trace "slot freed, adding to queue"
|
||||
|
||||
proc addSlotToQueue() {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
# first attempt to populate request using existing slot metadata in queue
|
||||
without var found =? queue.populateItem(requestId,
|
||||
slotIndex.truncate(uint16)):
|
||||
trace "no existing request metadata, getting request info from contract"
|
||||
# if there's no existing slot for that request, retrieve the request
|
||||
# from the contract.
|
||||
without request =? await market.getRequest(requestId):
|
||||
error "unknown request in contract"
|
||||
return
|
||||
|
||||
found = SlotQueueItem.init(request, slotIndex.truncate(uint16))
|
||||
|
||||
if err =? (await queue.push(found)).errorOption:
|
||||
raise err
|
||||
|
||||
addSlotToQueue()
|
||||
.track(sales)
|
||||
.catch(proc(err: ref CatchableError) =
|
||||
if err of NoMatchingAvailabilityError:
|
||||
info "slot in queue had no matching availabilities, ignoring"
|
||||
elif err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists"
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running"
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
)
|
||||
|
||||
proc subscribeRequested(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onStorageRequested(requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256) =
|
||||
sales.onStorageRequested(requestId, ask, expiry)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeRequests(onStorageRequested)
|
||||
sales.subscriptions.add(sub)
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to storage request events", msg = e.msg
|
||||
|
||||
proc subscribeCancellation(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onCancelled(requestId: RequestId) =
|
||||
trace "request cancelled, removing all request slots from queue"
|
||||
queue.delete(requestId)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeRequestCancelled(onCancelled)
|
||||
sales.subscriptions.add(sub)
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to cancellation events", msg = e.msg
|
||||
|
||||
proc subscribeFulfilled*(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onFulfilled(requestId: RequestId) =
|
||||
trace "request fulfilled, removing all request slots from queue"
|
||||
queue.delete(requestId)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onFulfilled(requestId)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeFulfillment(onFulfilled)
|
||||
sales.subscriptions.add(sub)
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to storage fulfilled events", msg = e.msg
|
||||
|
||||
proc subscribeFailure(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onFailed(requestId: RequestId) =
|
||||
trace "request failed, removing all request slots from queue"
|
||||
queue.delete(requestId)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onFailed(requestId)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeRequestFailed(onFailed)
|
||||
sales.subscriptions.add(sub)
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to storage failure events", msg = e.msg
|
||||
|
||||
proc subscribeSlotFilled(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
||||
trace "slot filled, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.truncate(uint16))
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onSlotFilled(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotFilled(onSlotFilled)
|
||||
sales.subscriptions.add(sub)
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot filled events", msg = e.msg
|
||||
|
||||
proc subscribeSlotFreed(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) =
|
||||
sales.onSlotFreed(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotFreed(onSlotFreed)
|
||||
sales.subscriptions.add(sub)
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot freed events", msg = e.msg
|
||||
|
||||
proc startSlotQueue(sales: Sales) {.async.} =
|
||||
let slotQueue = sales.context.slotQueue
|
||||
let reservations = sales.context.reservations
|
||||
|
||||
slotQueue.onProcessSlot =
|
||||
proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
sales.processSlot(item, done)
|
||||
|
||||
asyncSpawn slotQueue.start()
|
||||
|
||||
reservations.onReservationAdded =
|
||||
proc(availability: Availability) {.async.} =
|
||||
await sales.onReservationAdded(availability)
|
||||
|
||||
|
||||
proc subscribe(sales: Sales) {.async.} =
|
||||
await sales.subscribeRequested()
|
||||
await sales.subscribeFulfilled()
|
||||
await sales.subscribeFailure()
|
||||
await sales.subscribeSlotFilled()
|
||||
await sales.subscribeSlotFreed()
|
||||
await sales.subscribeCancellation()
|
||||
|
||||
proc unsubscribe(sales: Sales) {.async.} =
|
||||
for sub in sales.subscriptions:
|
||||
try:
|
||||
await sub.unsubscribe()
|
||||
except CatchableError as e:
|
||||
error "Unable to unsubscribe from subscription", error = e.msg
|
||||
|
||||
proc start*(sales: Sales) {.async.} =
|
||||
await sales.startSlotQueue()
|
||||
await sales.subscribe()
|
||||
await sales.load()
|
||||
|
||||
proc stop*(sales: Sales) {.async.} =
|
||||
if subscription =? sales.subscription:
|
||||
sales.subscription = market.Subscription.none
|
||||
try:
|
||||
await subscription.unsubscribe()
|
||||
except CatchableError as e:
|
||||
warn "Unsubscribe failed", msg = e.msg
|
||||
trace "stopping sales"
|
||||
sales.running = false
|
||||
await sales.context.slotQueue.stop()
|
||||
await sales.unsubscribe()
|
||||
await sales.trackedFutures.cancelTracked()
|
||||
|
||||
for agent in sales.agents:
|
||||
await agent.stop()
|
||||
|
||||
sales.agents = @[]
|
||||
|
||||
@ -42,7 +42,9 @@ type
|
||||
used*: bool
|
||||
Reservations* = ref object
|
||||
repo: RepoStore
|
||||
onReservationAdded: ?OnReservationAdded
|
||||
GetNext* = proc(): Future[?Availability] {.upraises: [], gcsafe, closure.}
|
||||
OnReservationAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
||||
AvailabilityIter* = ref object
|
||||
finished*: bool
|
||||
next*: GetNext
|
||||
@ -96,18 +98,22 @@ proc toErr[E1: ref CatchableError, E2: AvailabilityError](
|
||||
|
||||
proc writeValue*(
|
||||
writer: var JsonWriter,
|
||||
value: SlotId | AvailabilityId) {.upraises:[IOError].} =
|
||||
value: AvailabilityId) {.upraises:[IOError].} =
|
||||
|
||||
mixin writeValue
|
||||
writer.writeValue value.toArray
|
||||
|
||||
proc readValue*[T: SlotId | AvailabilityId](
|
||||
proc readValue*[T: AvailabilityId](
|
||||
reader: var JsonReader,
|
||||
value: var T) {.upraises: [SerializationError, IOError].} =
|
||||
|
||||
mixin readValue
|
||||
value = T reader.readValue(T.distinctBase)
|
||||
|
||||
proc `onReservationAdded=`*(self: Reservations,
|
||||
onReservationAdded: OnReservationAdded) =
|
||||
self.onReservationAdded = some onReservationAdded
|
||||
|
||||
func key(id: AvailabilityId): ?!Key =
|
||||
(ReservationsKey / id.toArray.toHex)
|
||||
|
||||
@ -210,6 +216,15 @@ proc reserve*(
|
||||
|
||||
return failure(updateErr)
|
||||
|
||||
if onReservationAdded =? self.onReservationAdded:
|
||||
try:
|
||||
await onReservationAdded(availability)
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of errors that `onProcessSlot` can
|
||||
# throw because it is caller-defined
|
||||
warn "Unknown error during 'onReservationAdded' callback",
|
||||
availabilityId = availability.id, error = e.msg
|
||||
|
||||
return success()
|
||||
|
||||
proc release*(
|
||||
@ -320,7 +335,7 @@ proc unused*(r: Reservations): Future[?!seq[Availability]] {.async.} =
|
||||
|
||||
proc find*(
|
||||
self: Reservations,
|
||||
size, duration, minPrice: UInt256, collateral: UInt256,
|
||||
size, duration, minPrice, collateral: UInt256,
|
||||
used: bool): Future[?Availability] {.async.} =
|
||||
|
||||
|
||||
|
||||
@ -1,8 +1,11 @@
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
import pkg/upraises
|
||||
import ../contracts/requests
|
||||
import ../utils/asyncspawn
|
||||
import ../errors
|
||||
import ./statemachine
|
||||
import ./salescontext
|
||||
import ./salesdata
|
||||
@ -13,10 +16,13 @@ export reservations
|
||||
logScope:
|
||||
topics = "marketplace sales"
|
||||
|
||||
type SalesAgent* = ref object of Machine
|
||||
context*: SalesContext
|
||||
data*: SalesData
|
||||
subscribed: bool
|
||||
type
|
||||
SalesAgent* = ref object of Machine
|
||||
context*: SalesContext
|
||||
data*: SalesData
|
||||
subscribed: bool
|
||||
SalesAgentError = object of CodexError
|
||||
AllSlotsFilledError* = object of SalesAgentError
|
||||
|
||||
func `==`*(a, b: SalesAgent): bool =
|
||||
a.data.requestId == b.data.requestId and
|
||||
@ -26,12 +32,13 @@ proc newSalesAgent*(context: SalesContext,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
request: ?StorageRequest): SalesAgent =
|
||||
SalesAgent(
|
||||
context: context,
|
||||
data: SalesData(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
request: request))
|
||||
var agent = SalesAgent.new()
|
||||
agent.context = context
|
||||
agent.data = SalesData(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
request: request)
|
||||
return agent
|
||||
|
||||
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
@ -41,7 +48,6 @@ proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
||||
|
||||
proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
let clock = agent.context.clock
|
||||
|
||||
proc onCancelled() {.async.} =
|
||||
@ -49,51 +55,34 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||
return
|
||||
|
||||
await clock.waitUntil(request.expiry.truncate(int64))
|
||||
if not data.fulfilled.isNil:
|
||||
asyncSpawn data.fulfilled.unsubscribe(), ignore = CatchableError
|
||||
agent.schedule(cancelledEvent(request))
|
||||
|
||||
data.cancelled = onCancelled()
|
||||
|
||||
proc onFulfilled(_: RequestId) =
|
||||
data.cancelled.cancel()
|
||||
method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
|
||||
if agent.data.requestId == requestId and
|
||||
not agent.data.cancelled.isNil:
|
||||
agent.data.cancelled.cancel()
|
||||
|
||||
data.fulfilled =
|
||||
await market.subscribeFulfillment(data.requestId, onFulfilled)
|
||||
|
||||
proc subscribeFailure(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
|
||||
proc onFailed(_: RequestId) =
|
||||
without request =? data.request:
|
||||
return
|
||||
asyncSpawn data.failed.unsubscribe(), ignore = CatchableError
|
||||
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
|
||||
without request =? agent.data.request:
|
||||
return
|
||||
if agent.data.requestId == requestId:
|
||||
agent.schedule(failedEvent(request))
|
||||
|
||||
data.failed =
|
||||
await market.subscribeRequestFailed(data.requestId, onFailed)
|
||||
method onSlotFilled*(agent: SalesAgent,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256) {.base, gcsafe, upraises: [].} =
|
||||
|
||||
proc subscribeSlotFilled(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
||||
asyncSpawn data.slotFilled.unsubscribe(), ignore = CatchableError
|
||||
agent.schedule(slotFilledEvent(requestId, data.slotIndex))
|
||||
|
||||
data.slotFilled =
|
||||
await market.subscribeSlotFilled(data.requestId,
|
||||
data.slotIndex,
|
||||
onSlotFilled)
|
||||
if agent.data.requestId == requestId and
|
||||
agent.data.slotIndex == slotIndex:
|
||||
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
||||
|
||||
proc subscribe*(agent: SalesAgent) {.async.} =
|
||||
if agent.subscribed:
|
||||
return
|
||||
|
||||
await agent.subscribeCancellation()
|
||||
await agent.subscribeFailure()
|
||||
await agent.subscribeSlotFilled()
|
||||
agent.subscribed = true
|
||||
|
||||
proc unsubscribe*(agent: SalesAgent) {.async.} =
|
||||
@ -101,30 +90,12 @@ proc unsubscribe*(agent: SalesAgent) {.async.} =
|
||||
return
|
||||
|
||||
let data = agent.data
|
||||
try:
|
||||
if not data.fulfilled.isNil:
|
||||
await data.fulfilled.unsubscribe()
|
||||
data.fulfilled = nil
|
||||
except CatchableError:
|
||||
discard
|
||||
try:
|
||||
if not data.failed.isNil:
|
||||
await data.failed.unsubscribe()
|
||||
data.failed = nil
|
||||
except CatchableError:
|
||||
discard
|
||||
try:
|
||||
if not data.slotFilled.isNil:
|
||||
await data.slotFilled.unsubscribe()
|
||||
data.slotFilled = nil
|
||||
except CatchableError:
|
||||
discard
|
||||
if not data.cancelled.isNil:
|
||||
if not data.cancelled.isNil and not data.cancelled.finished:
|
||||
await data.cancelled.cancelAndWait()
|
||||
data.cancelled = nil
|
||||
|
||||
agent.subscribed = false
|
||||
|
||||
proc stop*(agent: SalesAgent) {.async.} =
|
||||
procCall Machine(agent).stop()
|
||||
await Machine(agent).stop()
|
||||
await agent.unsubscribe()
|
||||
|
||||
@ -5,6 +5,7 @@ import ../node/batch
|
||||
import ../market
|
||||
import ../clock
|
||||
import ../proving
|
||||
import ./slotqueue
|
||||
import ./reservations
|
||||
|
||||
type
|
||||
@ -14,9 +15,10 @@ type
|
||||
onStore*: ?OnStore
|
||||
onClear*: ?OnClear
|
||||
onSale*: ?OnSale
|
||||
onIgnored*: OnIgnored
|
||||
onCleanUp*: OnCleanUp
|
||||
proving*: Proving
|
||||
reservations*: Reservations
|
||||
slotQueue*: SlotQueue
|
||||
|
||||
OnStore* = proc(request: StorageRequest,
|
||||
slot: UInt256,
|
||||
@ -27,4 +29,4 @@ type
|
||||
slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnSale* = proc(request: StorageRequest,
|
||||
slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnIgnored* = proc() {.gcsafe, upraises: [].}
|
||||
OnCleanUp* = proc: Future[void] {.gcsafe, upraises: [].}
|
||||
|
||||
@ -9,7 +9,4 @@ type
|
||||
ask*: StorageAsk
|
||||
request*: ?StorageRequest
|
||||
slotIndex*: UInt256
|
||||
failed*: market.Subscription
|
||||
fulfilled*: market.Subscription
|
||||
slotFilled*: market.Subscription
|
||||
cancelled*: Future[void]
|
||||
|
||||
395
codex/sales/slotqueue.nim
Normal file
395
codex/sales/slotqueue.nim
Normal file
@ -0,0 +1,395 @@
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import ./reservations
|
||||
import ../errors
|
||||
import ../rng
|
||||
import ../utils
|
||||
import ../contracts/requests
|
||||
import ../utils/asyncheapqueue
|
||||
import ../utils/then
|
||||
import ../utils/trackedfutures
|
||||
|
||||
logScope:
|
||||
topics = "marketplace slotqueue"
|
||||
|
||||
type
|
||||
OnProcessSlot* =
|
||||
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises:[].}
|
||||
|
||||
# Non-ref obj copies value when assigned, preventing accidental modification
|
||||
# of values which could cause an incorrect order (eg
|
||||
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
|
||||
# but the heap invariant would no longer be honoured. When non-ref, the
|
||||
# compiler can ensure that statement will fail).
|
||||
SlotQueueWorker = object
|
||||
doneProcessing*: Future[void]
|
||||
|
||||
SlotQueueItem* = object
|
||||
requestId: RequestId
|
||||
slotIndex: uint16
|
||||
slotSize: UInt256
|
||||
duration: UInt256
|
||||
reward: UInt256
|
||||
collateral: UInt256
|
||||
expiry: UInt256
|
||||
|
||||
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
|
||||
# because AsyncHeapQueue size is of type `int`, which is larger than `uint16`
|
||||
SlotQueueSize = range[1'u16..uint16.high]
|
||||
|
||||
SlotQueue* = ref object
|
||||
maxWorkers: int
|
||||
onProcessSlot: ?OnProcessSlot
|
||||
queue: AsyncHeapQueue[SlotQueueItem]
|
||||
reservations: Reservations
|
||||
running: bool
|
||||
workers: AsyncQueue[SlotQueueWorker]
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
SlotQueueError = object of CodexError
|
||||
SlotQueueItemExistsError* = object of SlotQueueError
|
||||
SlotQueueItemNotExistsError* = object of SlotQueueError
|
||||
SlotsOutOfRangeError* = object of SlotQueueError
|
||||
NoMatchingAvailabilityError* = object of SlotQueueError
|
||||
QueueNotRunningError* = object of SlotQueueError
|
||||
|
||||
# Number of concurrent workers used for processing SlotQueueItems
|
||||
const DefaultMaxWorkers = 3
|
||||
|
||||
# Cap slot queue size to prevent unbounded growth and make sifting more
|
||||
# efficient. Max size is not equivalent to the number of slots a host can
|
||||
# service, which is limited by host availabilities and new requests circulating
|
||||
# the network. Additionally, each new request/slot in the network will be
|
||||
# included in the queue if it is higher priority than any of the exisiting
|
||||
# items. Older slots should be unfillable over time as other hosts fill the
|
||||
# slots.
|
||||
const DefaultMaxSize = 64'u16
|
||||
|
||||
proc profitability(item: SlotQueueItem): UInt256 =
|
||||
StorageAsk(collateral: item.collateral,
|
||||
duration: item.duration,
|
||||
reward: item.reward,
|
||||
slotSize: item.slotSize).pricePerSlot
|
||||
|
||||
proc `<`*(a, b: SlotQueueItem): bool =
|
||||
# for A to have a higher priority than B (in a min queue), A must be less than
|
||||
# B.
|
||||
var scoreA: uint8 = 0
|
||||
var scoreB: uint8 = 0
|
||||
|
||||
proc addIf(score: var uint8, condition: bool, addition: int) =
|
||||
if condition:
|
||||
score += 1'u8 shl addition
|
||||
|
||||
scoreA.addIf(a.profitability > b.profitability, 3)
|
||||
scoreB.addIf(a.profitability < b.profitability, 3)
|
||||
|
||||
scoreA.addIf(a.collateral < b.collateral, 2)
|
||||
scoreB.addIf(a.collateral > b.collateral, 2)
|
||||
|
||||
scoreA.addIf(a.expiry > b.expiry, 1)
|
||||
scoreB.addIf(a.expiry < b.expiry, 1)
|
||||
|
||||
scoreA.addIf(a.slotSize < b.slotSize, 0)
|
||||
scoreB.addIf(a.slotSize > b.slotSize, 0)
|
||||
|
||||
return scoreA > scoreB
|
||||
|
||||
proc `==`*(a, b: SlotQueueItem): bool =
|
||||
a.requestId == b.requestId and
|
||||
a.slotIndex == b.slotIndex
|
||||
|
||||
proc new*(_: type SlotQueue,
|
||||
reservations: Reservations,
|
||||
maxWorkers = DefaultMaxWorkers,
|
||||
maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue =
|
||||
|
||||
if maxWorkers <= 0:
|
||||
raise newException(ValueError, "maxWorkers must be positive")
|
||||
if maxWorkers.uint16 > maxSize:
|
||||
raise newException(ValueError, "maxWorkers must be less than maxSize")
|
||||
|
||||
SlotQueue(
|
||||
maxWorkers: maxWorkers,
|
||||
# Add 1 to always allow for an extra item to be pushed onto the queue
|
||||
# temporarily. After push (and sort), the bottom-most item will be deleted
|
||||
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
|
||||
reservations: reservations,
|
||||
running: false,
|
||||
trackedFutures: TrackedFutures.new()
|
||||
)
|
||||
# avoid instantiating `workers` in constructor to avoid side effects in
|
||||
# `newAsyncQueue` procedure
|
||||
|
||||
proc init*(_: type SlotQueueWorker): SlotQueueWorker =
|
||||
SlotQueueWorker(
|
||||
doneProcessing: newFuture[void]("slotqueue.worker.processing")
|
||||
)
|
||||
|
||||
proc init*(_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint16,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256): SlotQueueItem =
|
||||
|
||||
SlotQueueItem(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
slotSize: ask.slotSize,
|
||||
duration: ask.duration,
|
||||
reward: ask.reward,
|
||||
collateral: ask.collateral,
|
||||
expiry: expiry
|
||||
)
|
||||
|
||||
proc init*(_: type SlotQueueItem,
|
||||
request: StorageRequest,
|
||||
slotIndex: uint16): SlotQueueItem =
|
||||
|
||||
SlotQueueItem.init(request.id,
|
||||
slotIndex,
|
||||
request.ask,
|
||||
request.expiry)
|
||||
|
||||
proc init*(_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256): seq[SlotQueueItem] =
|
||||
|
||||
if not ask.slots.inRange:
|
||||
raise newException(SlotsOutOfRangeError, "Too many slots")
|
||||
|
||||
var i = 0'u16
|
||||
proc initSlotQueueItem: SlotQueueItem =
|
||||
let item = SlotQueueItem.init(requestId, i, ask, expiry)
|
||||
inc i
|
||||
return item
|
||||
|
||||
var items = newSeqWith(ask.slots.int, initSlotQueueItem())
|
||||
Rng.instance.shuffle(items)
|
||||
return items
|
||||
|
||||
proc init*(_: type SlotQueueItem,
|
||||
request: StorageRequest): seq[SlotQueueItem] =
|
||||
|
||||
return SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
|
||||
proc inRange*(val: SomeUnsignedInt): bool =
|
||||
val.uint16 in SlotQueueSize.low..SlotQueueSize.high
|
||||
|
||||
proc requestId*(self: SlotQueueItem): RequestId = self.requestId
|
||||
proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex
|
||||
proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize
|
||||
proc duration*(self: SlotQueueItem): UInt256 = self.duration
|
||||
proc reward*(self: SlotQueueItem): UInt256 = self.reward
|
||||
proc collateral*(self: SlotQueueItem): UInt256 = self.collateral
|
||||
|
||||
proc running*(self: SlotQueue): bool = self.running
|
||||
|
||||
proc len*(self: SlotQueue): int = self.queue.len
|
||||
|
||||
proc size*(self: SlotQueue): int = self.queue.size - 1
|
||||
|
||||
proc `$`*(self: SlotQueue): string = $self.queue
|
||||
|
||||
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
|
||||
self.onProcessSlot = some onProcessSlot
|
||||
|
||||
proc activeWorkers*(self: SlotQueue): int =
|
||||
if not self.running: return 0
|
||||
|
||||
# active = capacity - available
|
||||
self.maxWorkers - self.workers.len
|
||||
|
||||
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
|
||||
self.queue.contains(item)
|
||||
|
||||
proc populateItem*(self: SlotQueue,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint16): ?SlotQueueItem =
|
||||
|
||||
trace "populate item, items in queue", len = self.queue.len
|
||||
for item in self.queue.items:
|
||||
trace "populate item search", itemRequestId = item.requestId, requestId
|
||||
if item.requestId == requestId:
|
||||
return some SlotQueueItem(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
slotSize: item.slotSize,
|
||||
duration: item.duration,
|
||||
reward: item.reward,
|
||||
collateral: item.collateral,
|
||||
expiry: item.expiry
|
||||
)
|
||||
return none SlotQueueItem
|
||||
|
||||
proc push*(self: SlotQueue, item: SlotQueueItem): Future[?!void] {.async.} =
|
||||
|
||||
trace "pushing item to queue",
|
||||
requestId = item.requestId, slotIndex = item.slotIndex
|
||||
|
||||
if not self.running:
|
||||
let err = newException(QueueNotRunningError, "queue not running")
|
||||
return failure(err)
|
||||
|
||||
without availability =? await self.reservations.find(item.slotSize,
|
||||
item.duration,
|
||||
item.profitability,
|
||||
item.collateral,
|
||||
used = false):
|
||||
let err = newException(NoMatchingAvailabilityError, "no availability")
|
||||
return failure(err)
|
||||
|
||||
if self.contains(item):
|
||||
let err = newException(SlotQueueItemExistsError, "item already exists")
|
||||
return failure(err)
|
||||
|
||||
if err =? self.queue.pushNoWait(item).mapFailure.errorOption:
|
||||
return failure(err)
|
||||
|
||||
if self.queue.full():
|
||||
# delete the last item
|
||||
self.queue.del(self.queue.size - 1)
|
||||
|
||||
doAssert self.queue.len <= self.queue.size - 1
|
||||
return success()
|
||||
|
||||
proc push*(self: SlotQueue, items: seq[SlotQueueItem]): Future[?!void] {.async.} =
|
||||
for item in items:
|
||||
if err =? (await self.push(item)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
return success()
|
||||
|
||||
proc findByRequest(self: SlotQueue, requestId: RequestId): seq[SlotQueueItem] =
|
||||
var items: seq[SlotQueueItem] = @[]
|
||||
for item in self.queue.items:
|
||||
if item.requestId == requestId:
|
||||
items.add item
|
||||
return items
|
||||
|
||||
proc delete*(self: SlotQueue, item: SlotQueueItem) =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
|
||||
trace "removing item from queue"
|
||||
|
||||
if not self.running:
|
||||
trace "cannot delete item from queue, queue not running"
|
||||
return
|
||||
|
||||
self.queue.delete(item)
|
||||
|
||||
proc delete*(self: SlotQueue, requestId: RequestId, slotIndex: uint16) =
|
||||
let item = SlotQueueItem(requestId: requestId, slotIndex: slotIndex)
|
||||
self.delete(item)
|
||||
|
||||
proc delete*(self: SlotQueue, requestId: RequestId) =
|
||||
let items = self.findByRequest(requestId)
|
||||
for item in items:
|
||||
self.delete(item)
|
||||
|
||||
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
|
||||
self.queue[i]
|
||||
|
||||
proc addWorker(self: SlotQueue): ?!void =
|
||||
if not self.running:
|
||||
let err = newException(QueueNotRunningError, "queue must be running")
|
||||
return failure(err)
|
||||
|
||||
trace "adding new worker to worker queue"
|
||||
|
||||
let worker = SlotQueueWorker.init()
|
||||
try:
|
||||
self.workers.addLastNoWait(worker)
|
||||
except AsyncQueueFullError:
|
||||
return failure("failed to add worker, worker queue full")
|
||||
|
||||
return success()
|
||||
|
||||
proc dispatch(self: SlotQueue,
|
||||
worker: SlotQueueWorker,
|
||||
item: SlotQueueItem) {.async.} =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
|
||||
if not self.running:
|
||||
warn "Could not dispatch worker because queue is not running"
|
||||
return
|
||||
|
||||
if onProcessSlot =? self.onProcessSlot:
|
||||
try:
|
||||
await onProcessSlot(item, worker.doneProcessing)
|
||||
await worker.doneProcessing
|
||||
|
||||
if err =? self.addWorker().errorOption:
|
||||
raise err # catch below
|
||||
|
||||
except QueueNotRunningError as e:
|
||||
info "could not re-add worker to worker queue, queue not running",
|
||||
error = e.msg
|
||||
except CancelledError:
|
||||
# do not bubble exception up as it is called with `asyncSpawn` which would
|
||||
# convert the exception into a `FutureDefect`
|
||||
discard
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of errors that `onProcessSlot` can
|
||||
# throw because it is caller-defined
|
||||
warn "Unknown error processing slot in worker", error = e.msg
|
||||
|
||||
proc start*(self: SlotQueue) {.async.} =
|
||||
if self.running:
|
||||
return
|
||||
|
||||
trace "starting slot queue"
|
||||
|
||||
self.running = true
|
||||
|
||||
# must be called in `start` to avoid sideeffects in `new`
|
||||
self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers)
|
||||
|
||||
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
|
||||
# task, a new worker will be pushed to the queue
|
||||
for i in 0..<self.maxWorkers:
|
||||
if err =? self.addWorker().errorOption:
|
||||
error "start: error adding new worker", error = err.msg
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
let worker = await self.workers.popFirst().track(self) # if workers saturated, wait here for new workers
|
||||
let item = await self.queue.pop().track(self) # if queue empty, wait here for new items
|
||||
|
||||
if not self.running: # may have changed after waiting for pop
|
||||
trace "not running, exiting"
|
||||
break
|
||||
|
||||
self.dispatch(worker, item)
|
||||
.track(self)
|
||||
.catch(proc (e: ref CatchableError) =
|
||||
error "Unknown error dispatching worker", error = e.msg
|
||||
)
|
||||
|
||||
discard worker.doneProcessing.track(self)
|
||||
|
||||
await sleepAsync(1.millis) # poll
|
||||
except CancelledError:
|
||||
discard
|
||||
except CatchableError as e: # raised from self.queue.pop() or self.workers.pop()
|
||||
warn "slot queue error encountered during processing", error = e.msg
|
||||
|
||||
proc stop*(self: SlotQueue) {.async.} =
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
trace "stopping slot queue"
|
||||
|
||||
self.running = false
|
||||
|
||||
await self.trackedFutures.cancelTracked()
|
||||
@ -9,12 +9,12 @@ import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./filled
|
||||
import ./ignored
|
||||
import ./proving
|
||||
import ./errored
|
||||
|
||||
type
|
||||
SaleDownloading* = ref object of ErrorHandlingState
|
||||
availability*: Availability
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales downloading"
|
||||
@ -36,9 +36,7 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.}
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let reservations = context.reservations
|
||||
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
let availability = state.availability
|
||||
|
||||
without onStore =? context.onStore:
|
||||
raiseAssert "onStore callback not set"
|
||||
@ -46,21 +44,8 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.}
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
debug "New request detected, downloading info", requestId = $data.requestId
|
||||
|
||||
without availability =? await reservations.find(
|
||||
request.ask.slotSize,
|
||||
request.ask.duration,
|
||||
request.ask.pricePerSlot,
|
||||
request.ask.collateral,
|
||||
used = false):
|
||||
info "No availability found for request, ignoring",
|
||||
requestId = $data.requestId,
|
||||
slotSize = request.ask.slotSize,
|
||||
duration = request.ask.duration,
|
||||
pricePerSlot = request.ask.pricePerSlot,
|
||||
used = false
|
||||
return some State(SaleIgnored())
|
||||
without slotIndex =? data.slotIndex:
|
||||
raiseAssert("no slot index assigned")
|
||||
|
||||
# mark availability as used so that it is not matched to other requests
|
||||
if markUsedErr =? (await reservations.markUsed(availability.id)).errorOption:
|
||||
@ -86,7 +71,7 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.}
|
||||
|
||||
trace "Starting download"
|
||||
if err =? (await onStore(request,
|
||||
data.slotIndex,
|
||||
slotIndex,
|
||||
onBatch)).errorOption:
|
||||
|
||||
markUnused(availability.id)
|
||||
|
||||
@ -21,11 +21,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} =
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
|
||||
error "Sale error", error=state.error.msg, requestId = $data.requestId
|
||||
|
||||
if onClear =? context.onClear and
|
||||
request =? data.request and
|
||||
slotIndex =? data.slotIndex:
|
||||
onClear(request, slotIndex)
|
||||
|
||||
await agent.unsubscribe()
|
||||
if onCleanUp =? context.onCleanUp:
|
||||
await onCleanUp()
|
||||
|
||||
error "Sale error", error=state.error.msg, requestId = $data.requestId
|
||||
|
||||
@ -23,7 +23,10 @@ method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
let host = await market.getHost(data.requestId, data.slotIndex)
|
||||
without slotIndex =? data.slotIndex:
|
||||
raiseAssert("no slot index assigned")
|
||||
|
||||
let host = await market.getHost(data.requestId, slotIndex)
|
||||
let me = await market.getSigner()
|
||||
if host == me.some:
|
||||
return some State(SaleFinished())
|
||||
|
||||
@ -32,5 +32,8 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
|
||||
without (collateral =? data.request.?ask.?collateral):
|
||||
raiseAssert "Request not set"
|
||||
|
||||
debug "Filling slot", requestId = $data.requestId, slot = $data.slotIndex
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
without slotIndex =? data.slotIndex:
|
||||
raiseAssert("no slot index assigned")
|
||||
|
||||
debug "Filling slot", requestId = $data.requestId, slotIndex
|
||||
await market.fillSlot(data.requestId, slotIndex, state.proof, collateral)
|
||||
|
||||
@ -36,4 +36,5 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} =
|
||||
if onSale =? context.onSale:
|
||||
onSale(request, slotIndex)
|
||||
|
||||
await agent.unsubscribe()
|
||||
if onCleanUp =? context.onCleanUp:
|
||||
await onCleanUp()
|
||||
|
||||
@ -12,7 +12,5 @@ method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} =
|
||||
let agent = SalesAgent(machine)
|
||||
let context = agent.context
|
||||
|
||||
if onIgnored =? context.onIgnored:
|
||||
onIgnored()
|
||||
|
||||
await agent.unsubscribe()
|
||||
if onCleanUp =? context.onCleanUp:
|
||||
await onCleanUp()
|
||||
|
||||
69
codex/sales/states/preparing.nim
Normal file
69
codex/sales/states/preparing.nim
Normal file
@ -0,0 +1,69 @@
|
||||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import ../../market
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./filled
|
||||
import ./ignored
|
||||
import ./downloading
|
||||
|
||||
type
|
||||
SalePreparing* = ref object of ErrorHandlingState
|
||||
|
||||
logScope:
|
||||
topics = "sales preparing"
|
||||
|
||||
method `$`*(state: SalePreparing): string = "SalePreparing"
|
||||
|
||||
method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method onSlotFilled*(state: SalePreparing, requestId: RequestId,
|
||||
slotIndex: UInt256): ?State =
|
||||
return some State(SaleFilled())
|
||||
|
||||
method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let market = context.market
|
||||
let reservations = context.reservations
|
||||
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let state = await market.slotState(slotId)
|
||||
if state != SlotState.Free:
|
||||
return some State(SaleIgnored())
|
||||
|
||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
|
||||
|
||||
# availability was checked for this slot when it entered the queue, however
|
||||
# check to the ensure that there is still availability as they may have
|
||||
# changed since being added (other slots may have been processed in that time)
|
||||
without availability =? await reservations.find(
|
||||
request.ask.slotSize,
|
||||
request.ask.duration,
|
||||
request.ask.pricePerSlot,
|
||||
request.ask.collateral,
|
||||
used = false):
|
||||
info "no availability found for request, ignoring",
|
||||
slotSize = request.ask.slotSize,
|
||||
duration = request.ask.duration,
|
||||
pricePerSlot = request.ask.pricePerSlot,
|
||||
used = false
|
||||
return some State(SaleIgnored())
|
||||
|
||||
return some State(SaleDownloading(availability: availability))
|
||||
@ -35,8 +35,11 @@ method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} =
|
||||
without onProve =? context.proving.onProve:
|
||||
raiseAssert "onProve callback not set"
|
||||
|
||||
without slotIndex =? data.slotIndex:
|
||||
raiseAssert("no slot index assigned")
|
||||
|
||||
debug "Start proof generation", requestId = $data.requestId
|
||||
let proof = await onProve(Slot(request: request, slotIndex: data.slotIndex))
|
||||
let proof = await onProve(Slot(request: request, slotIndex: slotIndex))
|
||||
debug "Finished proof generation", requestId = $data.requestId
|
||||
|
||||
return some State(SaleFilling(proof: proof))
|
||||
|
||||
@ -27,7 +27,10 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
without slotIndex =? data.slotIndex:
|
||||
raiseAssert("no slot index assigned")
|
||||
|
||||
let slotId = slotId(data.requestId, slotIndex)
|
||||
|
||||
without slotState =? await market.slotState(slotId):
|
||||
let error = newException(SaleUnknownError, "cannot retrieve slot state")
|
||||
|
||||
@ -47,7 +47,7 @@ proc retrieve*(
|
||||
trace "Cannot retrieve storage proof data from fs", path , error
|
||||
return failure("Cannot retrieve storage proof data from fs")
|
||||
|
||||
return PorMessage.decode(data).mapFailure
|
||||
return PorMessage.decode(data).mapFailure(CatchableError)
|
||||
|
||||
proc store*(
|
||||
self: StpStore,
|
||||
|
||||
@ -18,7 +18,7 @@ import pkg/questionable/results
|
||||
|
||||
import ../blocktype
|
||||
|
||||
export blocktype, libp2p
|
||||
export blocktype
|
||||
|
||||
type
|
||||
BlockNotFoundError* = object of CodexError
|
||||
|
||||
@ -20,6 +20,7 @@ const
|
||||
CodexMetaKey* = Key.init(CodexMetaNamespace).tryGet
|
||||
CodexRepoKey* = Key.init(CodexRepoNamespace).tryGet
|
||||
CodexBlocksKey* = Key.init(CodexBlocksNamespace).tryGet
|
||||
CodexTotalBlocksKey* = Key.init(CodexBlockTotalNamespace).tryGet
|
||||
CodexManifestKey* = Key.init(CodexManifestNamespace).tryGet
|
||||
BlocksTtlKey* = Key.init(CodexBlocksTtlNamespace).tryGet
|
||||
QuotaKey* = Key.init(CodexQuotaNamespace).tryGet
|
||||
|
||||
@ -13,7 +13,8 @@ push: {.upraises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/cid
|
||||
import pkg/metrics
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/datastore
|
||||
@ -25,11 +26,15 @@ import ../blocktype
|
||||
import ../clock
|
||||
import ../systemclock
|
||||
|
||||
export blocktype, libp2p
|
||||
export blocktype, cid
|
||||
|
||||
logScope:
|
||||
topics = "codex repostore"
|
||||
|
||||
declareGauge(codexRepostoreBlocks, "codex repostore blocks")
|
||||
declareGauge(codexRepostoreBytesUsed, "codex repostore bytes used")
|
||||
declareGauge(codexRepostoreBytesReserved, "codex repostore bytes reserved")
|
||||
|
||||
const
|
||||
DefaultBlockTtl* = 24.hours
|
||||
DefaultQuotaBytes* = 1'u shl 33'u # ~8GB
|
||||
@ -43,6 +48,7 @@ type
|
||||
repoDs*: Datastore
|
||||
metaDs*: Datastore
|
||||
clock: Clock
|
||||
totalBlocks*: uint # number of blocks in the store
|
||||
quotaMaxBytes*: uint # maximum available bytes
|
||||
quotaUsedBytes*: uint # bytes used by the repo
|
||||
quotaReservedBytes*: uint # bytes reserved by the repo
|
||||
@ -61,6 +67,11 @@ iterator items*(q: BlockExpirationIter): Future[?BlockExpiration] =
|
||||
while not q.finished:
|
||||
yield q.next()
|
||||
|
||||
proc updateMetrics(self: RepoStore) =
|
||||
codexRepostoreBlocks.set(self.totalBlocks.int64)
|
||||
codexRepostoreBytesUsed.set(self.quotaUsedBytes.int64)
|
||||
codexRepostoreBytesReserved.set(self.quotaReservedBytes.int64)
|
||||
|
||||
func totalUsed*(self: RepoStore): uint =
|
||||
(self.quotaUsedBytes + self.quotaReservedBytes)
|
||||
|
||||
@ -105,6 +116,14 @@ proc getBlockExpirationEntry(
|
||||
let value = self.getBlockExpirationTimestamp(ttl).toBytes
|
||||
return success((key, value))
|
||||
|
||||
proc persistTotalBlocksCount(self: RepoStore): Future[?!void] {.async.} =
|
||||
if err =? (await self.metaDs.put(
|
||||
CodexTotalBlocksKey,
|
||||
@(self.totalBlocks.uint64.toBytesBE))).errorOption:
|
||||
trace "Error total blocks key!", err = err.msg
|
||||
return failure(err)
|
||||
return success()
|
||||
|
||||
method putBlock*(
|
||||
self: RepoStore,
|
||||
blk: Block,
|
||||
@ -156,6 +175,12 @@ method putBlock*(
|
||||
return failure(err)
|
||||
|
||||
self.quotaUsedBytes = used
|
||||
inc self.totalBlocks
|
||||
if isErr (await self.persistTotalBlocksCount()):
|
||||
trace "Unable to update block total metadata"
|
||||
return failure("Unable to update block total metadata")
|
||||
|
||||
self.updateMetrics()
|
||||
return success()
|
||||
|
||||
proc updateQuotaBytesUsed(self: RepoStore, blk: Block): Future[?!void] {.async.} =
|
||||
@ -166,6 +191,7 @@ proc updateQuotaBytesUsed(self: RepoStore, blk: Block): Future[?!void] {.async.}
|
||||
trace "Error updating quota key!", err = err.msg
|
||||
return failure(err)
|
||||
self.quotaUsedBytes = used
|
||||
self.updateMetrics()
|
||||
return success()
|
||||
|
||||
proc removeBlockExpirationEntry(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
|
||||
@ -195,6 +221,12 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
|
||||
|
||||
trace "Deleted block", cid, totalUsed = self.totalUsed
|
||||
|
||||
dec self.totalBlocks
|
||||
if isErr (await self.persistTotalBlocksCount()):
|
||||
trace "Unable to update block total metadata"
|
||||
return failure("Unable to update block total metadata")
|
||||
|
||||
self.updateMetrics()
|
||||
return success()
|
||||
|
||||
method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
|
||||
@ -251,7 +283,7 @@ method getBlockExpirations*(
|
||||
offset: int
|
||||
): Future[?!BlockExpirationIter] {.async, base.} =
|
||||
## Get block experiartions from the given RepoStore
|
||||
##
|
||||
##
|
||||
without query =? createBlockExpirationQuery(maxNumber, offset), err:
|
||||
trace "Unable to format block expirations query"
|
||||
return failure(err)
|
||||
@ -346,6 +378,7 @@ proc release*(self: RepoStore, bytes: uint): Future[?!void] {.async.} =
|
||||
return failure(err)
|
||||
|
||||
trace "Released bytes", bytes
|
||||
self.updateMetrics()
|
||||
return success()
|
||||
|
||||
proc start*(self: RepoStore): Future[void] {.async.} =
|
||||
@ -358,6 +391,14 @@ proc start*(self: RepoStore): Future[void] {.async.} =
|
||||
|
||||
trace "Starting repo"
|
||||
|
||||
without total =? await self.metaDs.get(CodexTotalBlocksKey), err:
|
||||
if not (err of DatastoreKeyNotFound):
|
||||
error "Unable to read total number of blocks from metadata store", err = err.msg, key = $CodexTotalBlocksKey
|
||||
|
||||
if total.len > 0:
|
||||
self.totalBlocks = uint64.fromBytesBE(total).uint
|
||||
trace "Number of blocks in store at start", total = self.totalBlocks
|
||||
|
||||
## load current persist and cache bytes from meta ds
|
||||
without quotaUsedBytes =? await self.metaDs.get(QuotaUsedKey), err:
|
||||
if not (err of DatastoreKeyNotFound):
|
||||
@ -386,6 +427,7 @@ proc start*(self: RepoStore): Future[void] {.async.} =
|
||||
|
||||
notice "Current bytes used for persist quota", bytes = self.quotaReservedBytes
|
||||
|
||||
self.updateMetrics()
|
||||
self.started = true
|
||||
|
||||
proc stop*(self: RepoStore): Future[void] {.async.} =
|
||||
@ -410,8 +452,8 @@ func new*(
|
||||
quotaMaxBytes = DefaultQuotaBytes,
|
||||
blockTtl = DefaultBlockTtl
|
||||
): RepoStore =
|
||||
## Create new instance of a RepoStore
|
||||
##
|
||||
## Create new instance of a RepoStore
|
||||
##
|
||||
RepoStore(
|
||||
repoDs: repoDs,
|
||||
metaDs: metaDs,
|
||||
|
||||
@ -7,11 +7,11 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/stream/lpstream
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
|
||||
export libp2p, chronos, chronicles
|
||||
export lpstream, chronos, chronicles
|
||||
|
||||
logScope:
|
||||
topics = "codex seekablestream"
|
||||
|
||||
@ -13,7 +13,6 @@ import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/stew/ptrops
|
||||
|
||||
@ -283,7 +283,7 @@ proc len*[T](heap: AsyncHeapQueue[T]): int {.inline.} =
|
||||
|
||||
proc size*[T](heap: AsyncHeapQueue[T]): int {.inline.} =
|
||||
## Return the maximum number of elements in ``heap``.
|
||||
len(heap.maxsize)
|
||||
heap.maxsize
|
||||
|
||||
proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural) : T {.inline.} =
|
||||
## Access the i-th element of ``heap`` by order from first to last.
|
||||
|
||||
@ -1,7 +1,10 @@
|
||||
import std/sugar
|
||||
import pkg/questionable
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/upraises
|
||||
import ./trackedfutures
|
||||
import ./then
|
||||
|
||||
push: {.upraises:[].}
|
||||
|
||||
@ -10,8 +13,8 @@ type
|
||||
state: State
|
||||
running: Future[void]
|
||||
scheduled: AsyncQueue[Event]
|
||||
scheduling: Future[void]
|
||||
started: bool
|
||||
trackedFutures: TrackedFutures
|
||||
State* = ref object of RootObj
|
||||
Query*[T] = proc(state: State): T
|
||||
Event* = proc(state: State): ?State {.gcsafe, upraises:[].}
|
||||
@ -19,6 +22,9 @@ type
|
||||
logScope:
|
||||
topics = "statemachine"
|
||||
|
||||
proc new*[T: Machine](_: type T): T =
|
||||
T(trackedFutures: TrackedFutures.new())
|
||||
|
||||
method `$`*(state: State): string {.base.} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
@ -60,21 +66,21 @@ proc run(machine: Machine, state: State) {.async.} =
|
||||
discard
|
||||
|
||||
proc scheduler(machine: Machine) {.async.} =
|
||||
proc onRunComplete(udata: pointer) {.gcsafe.} =
|
||||
var fut = cast[FutureBase](udata)
|
||||
if fut.failed():
|
||||
machine.schedule(machine.onError(fut.error))
|
||||
|
||||
var running: Future[void]
|
||||
try:
|
||||
while true:
|
||||
let event = await machine.scheduled.get()
|
||||
while machine.started:
|
||||
let event = await machine.scheduled.get().track(machine)
|
||||
if next =? event(machine.state):
|
||||
if not machine.running.isNil:
|
||||
await machine.running.cancelAndWait()
|
||||
if not running.isNil and not running.finished:
|
||||
await running.cancelAndWait()
|
||||
machine.state = next
|
||||
debug "enter state", state = machine.state
|
||||
machine.running = machine.run(machine.state)
|
||||
machine.running.addCallback(onRunComplete)
|
||||
running = machine.run(machine.state)
|
||||
running
|
||||
.track(machine)
|
||||
.catch((err: ref CatchableError) =>
|
||||
machine.schedule(machine.onError(err))
|
||||
)
|
||||
except CancelledError:
|
||||
discard
|
||||
|
||||
@ -84,18 +90,20 @@ proc start*(machine: Machine, initialState: State) =
|
||||
|
||||
if machine.scheduled.isNil:
|
||||
machine.scheduled = newAsyncQueue[Event]()
|
||||
machine.scheduling = machine.scheduler()
|
||||
|
||||
machine.started = true
|
||||
machine.scheduler()
|
||||
.track(machine)
|
||||
.catch((err: ref CatchableError) =>
|
||||
error("Error in scheduler", error = err.msg)
|
||||
)
|
||||
machine.schedule(Event.transition(machine.state, initialState))
|
||||
|
||||
proc stop*(machine: Machine) =
|
||||
proc stop*(machine: Machine) {.async.} =
|
||||
if not machine.started:
|
||||
return
|
||||
|
||||
if not machine.scheduling.isNil:
|
||||
machine.scheduling.cancel()
|
||||
if not machine.running.isNil:
|
||||
machine.running.cancel()
|
||||
machine.started = false
|
||||
await machine.trackedFutures.cancelTracked()
|
||||
|
||||
machine.state = nil
|
||||
machine.started = false
|
||||
|
||||
@ -12,12 +12,14 @@ push: {.upraises: [].}
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/crypto/crypto
|
||||
|
||||
import ./fileutils
|
||||
import ../errors
|
||||
import ../rng
|
||||
|
||||
export crypto
|
||||
|
||||
type
|
||||
CodexKeyError = object of CodexError
|
||||
CodexKeyUnsafeError = object of CodexKeyError
|
||||
@ -37,7 +39,6 @@ proc setupKey*(path: string): ?!PrivateKey =
|
||||
warn "The network private key file is not safe, aborting"
|
||||
return failure newException(
|
||||
CodexKeyUnsafeError, "The network private key file is not safe")
|
||||
|
||||
return PrivateKey.init(
|
||||
? path.readAllBytes().mapFailure(CodexKeyError))
|
||||
.mapFailure(CodexKeyError)
|
||||
|
||||
let kb = ? path.readAllBytes().mapFailure(CodexKeyError)
|
||||
return PrivateKey.init(kb).mapFailure(CodexKeyError)
|
||||
|
||||
207
codex/utils/then.nim
Normal file
207
codex/utils/then.nim
Normal file
@ -0,0 +1,207 @@
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
|
||||
# Similar to JavaScript's Promise API, `.then` and `.catch` can be used to
|
||||
# handle results and errors of async `Futures` within a synchronous closure.
|
||||
# They can be used as an alternative to `asyncSpawn` which does not return a
|
||||
# value and will raise a `FutureDefect` if there are unhandled errors
|
||||
# encountered. Both `.then` and `.catch` act as callbacks that do not block the
|
||||
# synchronous closure's flow.
|
||||
|
||||
# `.then` is called when the `Future` is successfully completed and can be
|
||||
# chained as many times as desired, calling each `.then` callback in order. When
|
||||
# the `Future` returns `Result[T, ref CatchableError]` (or `?!T`), the value
|
||||
# called in the `.then` callback will be unpacked from the `Result` as a
|
||||
# convenience. In other words, for `Future[?!T]`, the `.then` callback will take
|
||||
# a single parameter `T`. See `tests/utils/testthen.nim` for more examples. To
|
||||
# allow for chaining, `.then` returns its future. If the future is already
|
||||
# complete, the `.then` callback will be executed immediately.
|
||||
|
||||
# `.catch` is called when the `Future` fails. In the case when the `Future`
|
||||
# returns a `Result[T, ref CatchableError` (or `?!T`), `.catch` will be called
|
||||
# if the `Result` contains an error. If the `Future` is already failed (or
|
||||
# `Future[?!T]` contains an error), the `.catch` callback will be executed
|
||||
# immediately.
|
||||
|
||||
# `.cancelled` is called when the `Future` is cancelled. If the `Future` is
|
||||
# already cancelled, the `.cancelled` callback will be executed immediately.
|
||||
|
||||
# More info on JavaScript's Promise API can be found at:
|
||||
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise
|
||||
|
||||
runnableExamples:
|
||||
proc asyncProc(): Future[int] {.async.} =
|
||||
await sleepAsync(1.millis)
|
||||
return 1
|
||||
|
||||
asyncProc()
|
||||
.then(proc(i: int) = echo "returned ", i)
|
||||
.catch(proc(e: ref CatchableError) = doAssert false, "will not be triggered")
|
||||
|
||||
# outputs "returned 1"
|
||||
|
||||
proc asyncProcWithError(): Future[int] {.async.} =
|
||||
await sleepAsync(1.millis)
|
||||
raise newException(ValueError, "some error")
|
||||
|
||||
asyncProcWithError()
|
||||
.then(proc(i: int) = doAssert false, "will not be triggered")
|
||||
.catch(proc(e: ref CatchableError) = echo "errored: ", e.msg)
|
||||
|
||||
# outputs "errored: some error"
|
||||
|
||||
type
|
||||
OnSuccess*[T] = proc(val: T) {.gcsafe, upraises: [].}
|
||||
OnError* = proc(err: ref CatchableError) {.gcsafe, upraises: [].}
|
||||
OnCancelled* = proc() {.gcsafe, upraises: [].}
|
||||
|
||||
proc ignoreError(err: ref CatchableError) = discard
|
||||
proc ignoreCancelled() = discard
|
||||
|
||||
template handleFinished(future: FutureBase,
|
||||
onError: OnError,
|
||||
onCancelled: OnCancelled) =
|
||||
|
||||
if not future.finished:
|
||||
return
|
||||
|
||||
if future.cancelled:
|
||||
onCancelled()
|
||||
return
|
||||
|
||||
if future.failed:
|
||||
onError(future.error)
|
||||
return
|
||||
|
||||
proc then*(future: Future[void], onSuccess: OnSuccess[void]): Future[void] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
onSuccess()
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc then*[T](future: Future[T], onSuccess: OnSuccess[T]): Future[T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
|
||||
if val =? future.read.catch:
|
||||
onSuccess(val)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc then*[T](future: Future[?!T], onSuccess: OnSuccess[T]): Future[?!T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
|
||||
try:
|
||||
if val =? future.read:
|
||||
onSuccess(val)
|
||||
except CatchableError as e:
|
||||
ignoreError(e)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc then*(future: Future[?!void], onSuccess: OnSuccess[void]): Future[?!void] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, ignoreCancelled)
|
||||
|
||||
try:
|
||||
if future.read.isOk:
|
||||
onSuccess()
|
||||
except CatchableError as e:
|
||||
ignoreError(e)
|
||||
return
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc catch*[T](future: Future[T], onError: OnError) =
|
||||
|
||||
if future.isNil: return
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(onError, ignoreCancelled)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
|
||||
proc catch*[T](future: Future[?!T], onError: OnError) =
|
||||
|
||||
if future.isNil: return
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(onError, ignoreCancelled)
|
||||
|
||||
try:
|
||||
if err =? future.read.errorOption:
|
||||
onError(err)
|
||||
except CatchableError as e:
|
||||
onError(e)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
|
||||
proc cancelled*[T](future: Future[T], onCancelled: OnCancelled): Future[T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, onCancelled)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
onCancelled()
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
|
||||
proc cancelled*[T](future: Future[?!T], onCancelled: OnCancelled): Future[?!T] =
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
future.handleFinished(ignoreError, onCancelled)
|
||||
|
||||
proc cancellation(udata: pointer) =
|
||||
if not future.finished():
|
||||
future.removeCallback(cb)
|
||||
onCancelled()
|
||||
|
||||
future.addCallback(cb)
|
||||
future.cancelCallback = cancellation
|
||||
return future
|
||||
51
codex/utils/trackedfutures.nim
Normal file
51
codex/utils/trackedfutures.nim
Normal file
@ -0,0 +1,51 @@
|
||||
import std/sugar
|
||||
import std/tables
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import ../utils/then
|
||||
|
||||
type
|
||||
TrackedFutures* = ref object
|
||||
futures: Table[uint, FutureBase]
|
||||
cancelling: bool
|
||||
|
||||
logScope:
|
||||
topics = "trackable futures"
|
||||
|
||||
proc len*(self: TrackedFutures): int = self.futures.len
|
||||
|
||||
proc removeFuture(self: TrackedFutures, future: FutureBase) =
|
||||
if not self.cancelling and not future.isNil:
|
||||
trace "removing tracked future"
|
||||
self.futures.del(future.id)
|
||||
|
||||
proc track*[T](self: TrackedFutures, fut: Future[T]): Future[T] =
|
||||
if self.cancelling:
|
||||
return fut
|
||||
|
||||
trace "tracking future", id = fut.id
|
||||
self.futures[fut.id] = FutureBase(fut)
|
||||
|
||||
fut
|
||||
.then((val: T) => self.removeFuture(fut))
|
||||
.cancelled(() => self.removeFuture(fut))
|
||||
.catch((e: ref CatchableError) => self.removeFuture(fut))
|
||||
|
||||
return fut
|
||||
|
||||
proc track*[T, U](future: Future[T], self: U): Future[T] =
|
||||
## Convenience method that allows chaining future, eg:
|
||||
## `await someFut().track(sales)`, where `sales` has declared a
|
||||
## `trackedFutures` property.
|
||||
self.trackedFutures.track(future)
|
||||
|
||||
proc cancelTracked*(self: TrackedFutures) {.async.} =
|
||||
self.cancelling = true
|
||||
|
||||
for future in self.futures.values:
|
||||
if not future.isNil and not future.finished:
|
||||
trace "cancelling tracked future", id = future.id
|
||||
await future.cancelAndWait()
|
||||
|
||||
self.futures.clear()
|
||||
self.cancelling = false
|
||||
16
config.nims
16
config.nims
@ -1,5 +1,7 @@
|
||||
import std/os
|
||||
|
||||
include "build.nims"
|
||||
|
||||
import std/os
|
||||
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
|
||||
|
||||
when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and
|
||||
@ -11,12 +13,12 @@ when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and
|
||||
|
||||
include "nimbus-build-system.paths"
|
||||
|
||||
if defined(release):
|
||||
when defined(release):
|
||||
switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName"))
|
||||
else:
|
||||
switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName"))
|
||||
|
||||
if defined(limitStackUsage):
|
||||
when defined(limitStackUsage):
|
||||
# This limits stack usage of each individual function to 1MB - the option is
|
||||
# available on some GCC versions but not all - run with `-d:limitStackUsage`
|
||||
# and look for .su files in "./build/", "./nimcache/" or $TMPDIR that list the
|
||||
@ -24,7 +26,7 @@ if defined(limitStackUsage):
|
||||
switch("passC", "-fstack-usage -Werror=stack-usage=1048576")
|
||||
switch("passL", "-fstack-usage -Werror=stack-usage=1048576")
|
||||
|
||||
if defined(windows):
|
||||
when defined(windows):
|
||||
# https://github.com/nim-lang/Nim/pull/19891
|
||||
switch("define", "nimRawSetjmp")
|
||||
|
||||
@ -48,8 +50,8 @@ if defined(windows):
|
||||
# engineering a more portable binary release, this should be tweaked but still
|
||||
# use at least -msse2 or -msse3.
|
||||
|
||||
if defined(disableMarchNative):
|
||||
if defined(i386) or defined(amd64):
|
||||
when defined(disableMarchNative):
|
||||
when defined(i386) or defined(amd64):
|
||||
switch("passC", "-mssse3")
|
||||
elif defined(macosx) and defined(arm64):
|
||||
# Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758
|
||||
@ -94,7 +96,7 @@ if not defined(macosx):
|
||||
--define:nimStackTraceOverride
|
||||
switch("import", "libbacktrace")
|
||||
|
||||
--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
|
||||
switch("define", "codex_enable_proof_failures=true")
|
||||
|
||||
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
|
||||
switch("warning", "CaseTransition:off")
|
||||
|
||||
@ -78,7 +78,7 @@ This GET request will return the node's debug information. The response JSON sho
|
||||
Replace `<SPR HERE>` in the next command with the string value for `spr`, returned by the first node's `debug/info` response.
|
||||
|
||||
Open a new terminal and run:
|
||||
- Mac/Unx: `"build/codex" --data-dir="$(pwd)\Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>`
|
||||
- Mac/Unx: `"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>`
|
||||
- Windows: `"build/codex.exe" --data-dir="Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>`
|
||||
|
||||
Notice we're using a new data-dir, and we've increased each port number by one. This is needed so that the new node won't try to open ports already in use by the first node.
|
||||
|
||||
@ -5,7 +5,6 @@ import std/tables
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
|
||||
import pkg/codex/rng
|
||||
|
||||
@ -5,7 +5,6 @@ import std/tables
|
||||
import pkg/asynctest
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/stores
|
||||
|
||||
@ -5,9 +5,6 @@ import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/blockexchange
|
||||
|
||||
@ -5,9 +5,9 @@ import std/algorithm
|
||||
import pkg/stew/byteutils
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2pdht/discv5/protocol as discv5
|
||||
import pkg/codexdht/discv5/protocol as discv5
|
||||
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/blockexchange
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
import pkg/codex/blockexchange/protobuf/presence
|
||||
import ../../examples
|
||||
|
||||
@ -3,8 +3,6 @@ import std/tables
|
||||
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/chunker
|
||||
|
||||
@ -3,7 +3,6 @@ import std/algorithm
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/asynctest
|
||||
import pkg/libp2p
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import pkg/codex/blocktype as bt
|
||||
|
||||
@ -14,6 +14,8 @@ import ../checktest
|
||||
|
||||
export randomchunker, nodeutils, mockdiscovery, eventually, checktest, manifest
|
||||
|
||||
export libp2p except setup, eventually
|
||||
|
||||
# NOTE: The meaning of equality for blocks
|
||||
# is changed here, because blocks are now `ref`
|
||||
# types. This is only in tests!!!
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import pkg/chronos
|
||||
|
||||
template eventually*(condition: untyped, timeout = 5.seconds): bool =
|
||||
template eventuallyCheck*(condition: untyped, timeout = 5.seconds): bool =
|
||||
proc loop: Future[bool] {.async.} =
|
||||
let start = Moment.now()
|
||||
while true:
|
||||
@ -11,3 +11,15 @@ template eventually*(condition: untyped, timeout = 5.seconds): bool =
|
||||
else:
|
||||
await sleepAsync(1.millis)
|
||||
await loop()
|
||||
|
||||
template always*(condition: untyped, timeout = 50.millis): bool =
|
||||
proc loop: Future[bool] {.async.} =
|
||||
let start = Moment.now()
|
||||
while true:
|
||||
if not condition:
|
||||
return false
|
||||
if Moment.now() > (start + timeout):
|
||||
return true
|
||||
else:
|
||||
await sleepAsync(1.millis)
|
||||
await loop()
|
||||
|
||||
@ -2,6 +2,8 @@ import std/sequtils
|
||||
import std/tables
|
||||
import std/hashes
|
||||
import std/sets
|
||||
import std/sugar
|
||||
import pkg/questionable
|
||||
import pkg/codex/market
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/contracts/config
|
||||
@ -53,7 +55,7 @@ type
|
||||
callback: OnRequest
|
||||
FulfillmentSubscription* = ref object of Subscription
|
||||
market: MockMarket
|
||||
requestId: RequestId
|
||||
requestId: ?RequestId
|
||||
callback: OnFulfillment
|
||||
SlotFilledSubscription* = ref object of Subscription
|
||||
market: MockMarket
|
||||
@ -65,11 +67,11 @@ type
|
||||
callback: OnSlotFreed
|
||||
RequestCancelledSubscription* = ref object of Subscription
|
||||
market: MockMarket
|
||||
requestId: RequestId
|
||||
requestId: ?RequestId
|
||||
callback: OnRequestCancelled
|
||||
RequestFailedSubscription* = ref object of Subscription
|
||||
market: MockMarket
|
||||
requestId: RequestId
|
||||
requestId: ?RequestId
|
||||
callback: OnRequestCancelled
|
||||
ProofSubmittedSubscription = ref object of Subscription
|
||||
market: MockMarket
|
||||
@ -83,7 +85,7 @@ proc hash*(requestId: RequestId): Hash =
|
||||
|
||||
proc new*(_: type MockMarket): MockMarket =
|
||||
## Create a new mocked Market instance
|
||||
##
|
||||
##
|
||||
let config = MarketplaceConfig(
|
||||
collateral: CollateralConfig(
|
||||
repairRewardPercentage: 10,
|
||||
@ -112,7 +114,9 @@ method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} =
|
||||
market.requested.add(request)
|
||||
var subscriptions = market.subscriptions.onRequest
|
||||
for subscription in subscriptions:
|
||||
subscription.callback(request.id, request.ask)
|
||||
subscription.callback(request.id,
|
||||
request.ask,
|
||||
request.expiry)
|
||||
|
||||
method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} =
|
||||
return market.activeRequests[market.signer]
|
||||
@ -173,28 +177,32 @@ proc emitSlotFilled*(market: MockMarket,
|
||||
if requestMatches and slotMatches:
|
||||
subscription.callback(requestId, slotIndex)
|
||||
|
||||
proc emitSlotFreed*(market: MockMarket, slotId: SlotId) =
|
||||
proc emitSlotFreed*(market: MockMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256) =
|
||||
var subscriptions = market.subscriptions.onSlotFreed
|
||||
for subscription in subscriptions:
|
||||
subscription.callback(slotId)
|
||||
subscription.callback(requestId, slotIndex)
|
||||
|
||||
proc emitRequestCancelled*(market: MockMarket,
|
||||
requestId: RequestId) =
|
||||
proc emitRequestCancelled*(market: MockMarket, requestId: RequestId) =
|
||||
var subscriptions = market.subscriptions.onRequestCancelled
|
||||
for subscription in subscriptions:
|
||||
if subscription.requestId == requestId:
|
||||
if subscription.requestId == requestId.some or
|
||||
subscription.requestId.isNone:
|
||||
subscription.callback(requestId)
|
||||
|
||||
proc emitRequestFulfilled*(market: MockMarket, requestId: RequestId) =
|
||||
var subscriptions = market.subscriptions.onFulfillment
|
||||
for subscription in subscriptions:
|
||||
if subscription.requestId == requestId:
|
||||
if subscription.requestId == requestId.some or
|
||||
subscription.requestId.isNone:
|
||||
subscription.callback(requestId)
|
||||
|
||||
proc emitRequestFailed*(market: MockMarket, requestId: RequestId) =
|
||||
var subscriptions = market.subscriptions.onRequestFailed
|
||||
for subscription in subscriptions:
|
||||
if subscription.requestId == requestId:
|
||||
if subscription.requestId == requestId.some or
|
||||
subscription.requestId.isNone:
|
||||
subscription.callback(requestId)
|
||||
|
||||
proc fillSlot*(market: MockMarket,
|
||||
@ -221,7 +229,12 @@ method fillSlot*(market: MockMarket,
|
||||
|
||||
method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} =
|
||||
market.freed.add(slotId)
|
||||
market.emitSlotFreed(slotId)
|
||||
for s in market.filled:
|
||||
if slotId(s.requestId, s.slotIndex) == slotId:
|
||||
market.emitSlotFreed(s.requestId, s.slotIndex)
|
||||
break
|
||||
market.slotState[slotId] = SlotState.Free
|
||||
|
||||
|
||||
method withdrawFunds*(market: MockMarket,
|
||||
requestId: RequestId) {.async.} =
|
||||
@ -281,13 +294,24 @@ method subscribeRequests*(market: MockMarket,
|
||||
market.subscriptions.onRequest.add(subscription)
|
||||
return subscription
|
||||
|
||||
method subscribeFulfillment*(market: MockMarket,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.async.} =
|
||||
let subscription = FulfillmentSubscription(
|
||||
market: market,
|
||||
requestId: none RequestId,
|
||||
callback: callback
|
||||
)
|
||||
market.subscriptions.onFulfillment.add(subscription)
|
||||
return subscription
|
||||
|
||||
method subscribeFulfillment*(market: MockMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnFulfillment):
|
||||
Future[Subscription] {.async.} =
|
||||
let subscription = FulfillmentSubscription(
|
||||
market: market,
|
||||
requestId: requestId,
|
||||
requestId: some requestId,
|
||||
callback: callback
|
||||
)
|
||||
market.subscriptions.onFulfillment.add(subscription)
|
||||
@ -321,25 +345,47 @@ method subscribeSlotFreed*(market: MockMarket,
|
||||
market.subscriptions.onSlotFreed.add(subscription)
|
||||
return subscription
|
||||
|
||||
method subscribeRequestCancelled*(market: MockMarket,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.async.} =
|
||||
let subscription = RequestCancelledSubscription(
|
||||
market: market,
|
||||
requestId: none RequestId,
|
||||
callback: callback
|
||||
)
|
||||
market.subscriptions.onRequestCancelled.add(subscription)
|
||||
return subscription
|
||||
|
||||
method subscribeRequestCancelled*(market: MockMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestCancelled):
|
||||
Future[Subscription] {.async.} =
|
||||
let subscription = RequestCancelledSubscription(
|
||||
market: market,
|
||||
requestId: requestId,
|
||||
requestId: some requestId,
|
||||
callback: callback
|
||||
)
|
||||
market.subscriptions.onRequestCancelled.add(subscription)
|
||||
return subscription
|
||||
|
||||
method subscribeRequestFailed*(market: MockMarket,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.async.} =
|
||||
let subscription = RequestFailedSubscription(
|
||||
market: market,
|
||||
requestId: none RequestId,
|
||||
callback: callback
|
||||
)
|
||||
market.subscriptions.onRequestFailed.add(subscription)
|
||||
return subscription
|
||||
|
||||
method subscribeRequestFailed*(market: MockMarket,
|
||||
requestId: RequestId,
|
||||
callback: OnRequestFailed):
|
||||
Future[Subscription] {.async.} =
|
||||
let subscription = RequestFailedSubscription(
|
||||
market: market,
|
||||
requestId: requestId,
|
||||
requestId: some requestId,
|
||||
callback: callback
|
||||
)
|
||||
market.subscriptions.onRequestFailed.add(subscription)
|
||||
@ -355,6 +401,17 @@ method subscribeProofSubmission*(mock: MockMarket,
|
||||
mock.subscriptions.onProofSubmitted.add(subscription)
|
||||
return subscription
|
||||
|
||||
method queryPastStorageRequests*(market: MockMarket,
|
||||
blocksAgo: int):
|
||||
Future[seq[PastStorageRequest]] {.async.} =
|
||||
# MockMarket does not have the concept of blocks, so simply return all
|
||||
# previous events
|
||||
return market.requested.map(request =>
|
||||
PastStorageRequest(requestId: request.id,
|
||||
ask: request.ask,
|
||||
expiry: request.expiry)
|
||||
)
|
||||
|
||||
method unsubscribe*(subscription: RequestSubscription) {.async.} =
|
||||
subscription.market.subscriptions.onRequest.keepItIf(it != subscription)
|
||||
|
||||
|
||||
16
tests/codex/helpers/mocksalesagent.nim
Normal file
16
tests/codex/helpers/mocksalesagent.nim
Normal file
@ -0,0 +1,16 @@
|
||||
import pkg/codex/sales/salesagent
|
||||
|
||||
type
|
||||
MockSalesAgent = ref object of SalesAgent
|
||||
fulfilledCalled*: bool
|
||||
failedCalled*: bool
|
||||
slotFilledCalled*: bool
|
||||
|
||||
method onFulfilled*(agent: SalesAgent, requestId: RequestId) =
|
||||
fulfilledCalled = true
|
||||
|
||||
method onFailed*(agent: SalesAgent, requestId: RequestId) =
|
||||
failedCalled = true
|
||||
|
||||
method onSlotFilled*(agent: SalesAgent, requestId: RequestId, slotIndex: UInt256) {.base.} =
|
||||
slotFilledCalled = true
|
||||
@ -2,6 +2,7 @@ import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
|
||||
import pkg/codex/discovery
|
||||
import pkg/codex/stores
|
||||
|
||||
108
tests/codex/merkletree/testmerkletree.nim
Normal file
108
tests/codex/merkletree/testmerkletree.nim
Normal file
@ -0,0 +1,108 @@
|
||||
import std/unittest
|
||||
import std/bitops
|
||||
import std/random
|
||||
import std/sequtils
|
||||
import pkg/libp2p
|
||||
import codex/merkletree/merkletree
|
||||
import ../helpers
|
||||
import pkg/questionable/results
|
||||
|
||||
checksuite "merkletree":
|
||||
const sha256 = multiCodec("sha2-256")
|
||||
const sha512 = multiCodec("sha2-512")
|
||||
|
||||
proc randomHash(codec: MultiCodec = sha256): MerkleHash =
|
||||
var data: array[0..31, byte]
|
||||
for i in 0..31:
|
||||
data[i] = rand(uint8)
|
||||
return MultiHash.digest($codec, data).tryGet()
|
||||
|
||||
proc combine(a, b: MerkleHash, codec: MultiCodec = sha256): MerkleHash =
|
||||
var buf = newSeq[byte](a.size + b.size)
|
||||
for i in 0..<a.size:
|
||||
buf[i] = a.data.buffer[i]
|
||||
for i in 0..<b.size:
|
||||
buf[i + a.size] = b.data.buffer[i]
|
||||
return MultiHash.digest($codec, buf).tryGet()
|
||||
|
||||
var
|
||||
leaves: array[0..10, MerkleHash]
|
||||
|
||||
setup:
|
||||
for i in 0..leaves.high:
|
||||
leaves[i] = randomHash()
|
||||
|
||||
test "tree with one leaf has expected root":
|
||||
let tree = MerkleTree.init(leaves[0..0]).tryGet()
|
||||
|
||||
check:
|
||||
tree.leaves == leaves[0..0]
|
||||
tree.root == leaves[0]
|
||||
tree.len == 1
|
||||
|
||||
test "tree with two leaves has expected root":
|
||||
let
|
||||
expectedRoot = combine(leaves[0], leaves[1])
|
||||
|
||||
let tree = MerkleTree.init(leaves[0..1]).tryGet()
|
||||
|
||||
check:
|
||||
tree.leaves == leaves[0..1]
|
||||
tree.len == 3
|
||||
tree.root == expectedRoot
|
||||
|
||||
test "tree with three leaves has expected root":
|
||||
let
|
||||
expectedRoot = combine(combine(leaves[0], leaves[1]), combine(leaves[2], leaves[2]))
|
||||
|
||||
let tree = MerkleTree.init(leaves[0..2]).tryGet()
|
||||
|
||||
check:
|
||||
tree.leaves == leaves[0..2]
|
||||
tree.len == 6
|
||||
tree.root == expectedRoot
|
||||
|
||||
test "tree with two leaves provides expected proofs":
|
||||
let tree = MerkleTree.init(leaves[0..1]).tryGet()
|
||||
|
||||
let expectedProofs = [
|
||||
MerkleProof.init(0, @[leaves[1]]),
|
||||
MerkleProof.init(1, @[leaves[0]]),
|
||||
]
|
||||
|
||||
check:
|
||||
tree.getProof(0).tryGet() == expectedProofs[0]
|
||||
tree.getProof(1).tryGet() == expectedProofs[1]
|
||||
|
||||
test "tree with three leaves provides expected proofs":
|
||||
let tree = MerkleTree.init(leaves[0..2]).tryGet()
|
||||
|
||||
let expectedProofs = [
|
||||
MerkleProof.init(0, @[leaves[1], combine(leaves[2], leaves[2])]),
|
||||
MerkleProof.init(1, @[leaves[0], combine(leaves[2], leaves[2])]),
|
||||
MerkleProof.init(2, @[leaves[2], combine(leaves[0], leaves[1])]),
|
||||
]
|
||||
|
||||
check:
|
||||
tree.getProof(0).tryGet() == expectedProofs[0]
|
||||
tree.getProof(1).tryGet() == expectedProofs[1]
|
||||
tree.getProof(2).tryGet() == expectedProofs[2]
|
||||
|
||||
test "getProof fails for index out of bounds":
|
||||
let tree = MerkleTree.init(leaves[0..3]).tryGet()
|
||||
|
||||
check:
|
||||
isErr(tree.getProof(-1))
|
||||
isErr(tree.getProof(4))
|
||||
|
||||
test "can create MerkleTree directly from root hash":
|
||||
let tree = MerkleTree.init(leaves[0], 1)
|
||||
|
||||
check:
|
||||
tree.root == leaves[0]
|
||||
|
||||
test "cannot create MerkleTree from leaves with different codec":
|
||||
let res = MerkleTree.init(@[randomHash(sha256), randomHash(sha512)])
|
||||
|
||||
check:
|
||||
isErr(res)
|
||||
29
tests/codex/sales/states/testpreparing.nim
Normal file
29
tests/codex/sales/states/testpreparing.nim
Normal file
@ -0,0 +1,29 @@
|
||||
import std/unittest
|
||||
import pkg/questionable
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/sales/states/downloading
|
||||
import pkg/codex/sales/states/cancelled
|
||||
import pkg/codex/sales/states/failed
|
||||
import pkg/codex/sales/states/filled
|
||||
import ../../examples
|
||||
|
||||
suite "sales state 'preparing'":
|
||||
|
||||
let request = StorageRequest.example
|
||||
let slotIndex = (request.ask.slots div 2).u256
|
||||
var state: SalePreparing
|
||||
|
||||
setup:
|
||||
state = SalePreparing.new()
|
||||
|
||||
test "switches to cancelled state when request expires":
|
||||
let next = state.onCancelled(request)
|
||||
check !next of SaleCancelled
|
||||
|
||||
test "switches to failed state when request fails":
|
||||
let next = state.onFailed(request)
|
||||
check !next of SaleFailed
|
||||
|
||||
test "switches to filled state when slot is filled":
|
||||
let next = state.onSlotFilled(request.id, slotIndex)
|
||||
check !next of SaleFilled
|
||||
@ -11,6 +11,7 @@ import pkg/codex/sales
|
||||
import pkg/codex/sales/salesdata
|
||||
import pkg/codex/sales/salescontext
|
||||
import pkg/codex/sales/reservations
|
||||
import pkg/codex/sales/slotqueue
|
||||
import pkg/codex/stores/repostore
|
||||
import pkg/codex/proving
|
||||
import pkg/codex/blocktype as bt
|
||||
@ -21,10 +22,9 @@ import ../helpers/eventually
|
||||
import ../examples
|
||||
import ./helpers
|
||||
|
||||
asyncchecksuite "Sales":
|
||||
asyncchecksuite "Sales - start":
|
||||
let proof = exampleProof()
|
||||
|
||||
var availability: Availability
|
||||
var request: StorageRequest
|
||||
var sales: Sales
|
||||
var market: MockMarket
|
||||
@ -32,14 +32,10 @@ asyncchecksuite "Sales":
|
||||
var proving: Proving
|
||||
var reservations: Reservations
|
||||
var repo: RepoStore
|
||||
var queue: SlotQueue
|
||||
var itemsProcessed: seq[SlotQueueItem]
|
||||
|
||||
setup:
|
||||
availability = Availability.init(
|
||||
size=100.u256,
|
||||
duration=60.u256,
|
||||
minPrice=600.u256,
|
||||
maxCollateral=400.u256
|
||||
)
|
||||
request = StorageRequest(
|
||||
ask: StorageAsk(
|
||||
slots: 4,
|
||||
@ -67,22 +63,222 @@ asyncchecksuite "Sales":
|
||||
slot: UInt256,
|
||||
onBatch: BatchProc): Future[?!void] {.async.} =
|
||||
return success()
|
||||
queue = sales.context.slotQueue
|
||||
proving.onProve = proc(slot: Slot): Future[seq[byte]] {.async.} =
|
||||
return proof
|
||||
itemsProcessed = @[]
|
||||
request.expiry = (clock.now() + 42).u256
|
||||
|
||||
teardown:
|
||||
await sales.stop()
|
||||
await repo.stop()
|
||||
|
||||
proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} =
|
||||
let address = await market.getSigner()
|
||||
let slot = MockSlot(requestId: request.id,
|
||||
slotIndex: slotIdx,
|
||||
proof: proof,
|
||||
host: address)
|
||||
market.filled.add slot
|
||||
market.slotState[slotId(request.id, slotIdx)] = SlotState.Filled
|
||||
|
||||
test "load slots when Sales module starts":
|
||||
let me = await market.getSigner()
|
||||
|
||||
request.ask.slots = 2
|
||||
market.requested = @[request]
|
||||
market.requestState[request.id] = RequestState.New
|
||||
|
||||
let slot0 = MockSlot(requestId: request.id,
|
||||
slotIndex: 0.u256,
|
||||
proof: proof,
|
||||
host: me)
|
||||
await fillSlot(slot0.slotIndex)
|
||||
|
||||
let slot1 = MockSlot(requestId: request.id,
|
||||
slotIndex: 1.u256,
|
||||
proof: proof,
|
||||
host: me)
|
||||
await fillSlot(slot1.slotIndex)
|
||||
|
||||
market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)]
|
||||
market.requested = @[request]
|
||||
market.activeRequests[me] = @[request.id]
|
||||
|
||||
await sales.start()
|
||||
|
||||
check eventually sales.agents.len == 2
|
||||
check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256)
|
||||
check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256)
|
||||
|
||||
asyncchecksuite "Sales":
|
||||
let proof = exampleProof()
|
||||
|
||||
var availability: Availability
|
||||
var request: StorageRequest
|
||||
var sales: Sales
|
||||
var market: MockMarket
|
||||
var clock: MockClock
|
||||
var proving: Proving
|
||||
var reservations: Reservations
|
||||
var repo: RepoStore
|
||||
var queue: SlotQueue
|
||||
var itemsProcessed: seq[SlotQueueItem]
|
||||
|
||||
setup:
|
||||
availability = Availability.init(
|
||||
size=100.u256,
|
||||
duration=60.u256,
|
||||
minPrice=600.u256,
|
||||
maxCollateral=400.u256
|
||||
)
|
||||
request = StorageRequest(
|
||||
ask: StorageAsk(
|
||||
slots: 4,
|
||||
slotSize: 100.u256,
|
||||
duration: 60.u256,
|
||||
reward: 10.u256,
|
||||
collateral: 200.u256,
|
||||
),
|
||||
content: StorageContent(
|
||||
cid: "some cid"
|
||||
),
|
||||
expiry: (getTime() + initDuration(hours=1)).toUnix.u256
|
||||
)
|
||||
|
||||
market = MockMarket.new()
|
||||
|
||||
let me = await market.getSigner()
|
||||
market.activeSlots[me] = @[]
|
||||
|
||||
clock = MockClock.new()
|
||||
proving = Proving.new()
|
||||
let repoDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
let metaDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
repo = RepoStore.new(repoDs, metaDs)
|
||||
await repo.start()
|
||||
sales = Sales.new(market, clock, proving, repo)
|
||||
reservations = sales.context.reservations
|
||||
sales.onStore = proc(request: StorageRequest,
|
||||
slot: UInt256,
|
||||
onBatch: BatchProc): Future[?!void] {.async.} =
|
||||
return success()
|
||||
queue = sales.context.slotQueue
|
||||
proving.onProve = proc(slot: Slot): Future[seq[byte]] {.async.} =
|
||||
return proof
|
||||
await sales.start()
|
||||
request.expiry = (clock.now() + 42).u256
|
||||
itemsProcessed = @[]
|
||||
|
||||
teardown:
|
||||
await repo.stop()
|
||||
await sales.stop()
|
||||
await repo.stop()
|
||||
|
||||
proc getAvailability: ?!Availability =
|
||||
waitFor reservations.get(availability.id)
|
||||
|
||||
proc wasIgnored: Future[bool] {.async.} =
|
||||
return
|
||||
eventually sales.agents.len == 1 and # agent created at first
|
||||
eventually sales.agents.len == 0 # then removed once ignored
|
||||
proc notProcessed(itemsProcessed: seq[SlotQueueItem],
|
||||
request: StorageRequest): bool =
|
||||
let items = SlotQueueItem.init(request)
|
||||
for i in 0..<items.len:
|
||||
if itemsProcessed.contains(items[i]):
|
||||
return false
|
||||
return true
|
||||
|
||||
proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} =
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
await sleepAsync(10.millis)
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateral = request.ask.collateral + 1
|
||||
discard await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
await market.requestStorage(request1)
|
||||
await sleepAsync(5.millis) # wait for request slots to be added to queue
|
||||
return request1
|
||||
|
||||
test "processes all request's slots once StorageRequested emitted":
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
let items = SlotQueueItem.init(request)
|
||||
check eventually items.allIt(itemsProcessed.contains(it))
|
||||
|
||||
test "removes slots from slot queue once RequestCancelled emitted":
|
||||
let request1 = await addRequestToSaturatedQueue()
|
||||
market.emitRequestCancelled(request1.id)
|
||||
check always itemsProcessed.notProcessed(request1)
|
||||
|
||||
test "removes request from slot queue once RequestFailed emitted":
|
||||
let request1 = await addRequestToSaturatedQueue()
|
||||
market.emitRequestFailed(request1.id)
|
||||
check always itemsProcessed.notProcessed(request1)
|
||||
|
||||
test "removes request from slot queue once RequestFulfilled emitted":
|
||||
let request1 = await addRequestToSaturatedQueue()
|
||||
market.emitRequestFulfilled(request1.id)
|
||||
check always itemsProcessed.notProcessed(request1)
|
||||
|
||||
test "removes slot index from slot queue once SlotFilled emitted":
|
||||
let request1 = await addRequestToSaturatedQueue()
|
||||
market.emitSlotFilled(request1.id, 1.u256)
|
||||
let expected = SlotQueueItem.init(request1, 1'u16)
|
||||
check always (not itemsProcessed.contains(expected))
|
||||
|
||||
test "adds slot index to slot queue once SlotFreed emitted":
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
|
||||
check isOk await reservations.reserve(availability)
|
||||
market.requested.add request # "contract" must be able to return request
|
||||
market.emitSlotFreed(request.id, 2.u256)
|
||||
|
||||
let expected = SlotQueueItem.init(request, 2.uint16)
|
||||
check eventually itemsProcessed.contains(expected)
|
||||
|
||||
test "request slots are not added to the slot queue when no availabilities exist":
|
||||
var itemsProcessed: seq[SlotQueueItem] = @[]
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
|
||||
await market.requestStorage(request)
|
||||
# check that request was ignored due to no matching availability
|
||||
check always itemsProcessed.len == 0
|
||||
|
||||
test "non-matching availabilities/requests are not added to the slot queue":
|
||||
var itemsProcessed: seq[SlotQueueItem] = @[]
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
|
||||
let nonMatchingAvailability = Availability.init(
|
||||
size=100.u256,
|
||||
duration=60.u256,
|
||||
minPrice=601.u256, # too high
|
||||
maxCollateral=400.u256
|
||||
)
|
||||
check isOk await reservations.reserve(nonMatchingAvailability)
|
||||
await market.requestStorage(request)
|
||||
# check that request was ignored due to no matching availability
|
||||
check always itemsProcessed.len == 0
|
||||
|
||||
test "adds past requests to queue once availability added":
|
||||
var itemsProcessed: seq[SlotQueueItem] = @[]
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
|
||||
await market.requestStorage(request)
|
||||
|
||||
# now add matching availability
|
||||
check isOk await reservations.reserve(availability)
|
||||
check eventuallyCheck itemsProcessed.len == request.ask.slots.int
|
||||
|
||||
test "makes storage unavailable when downloading a matched request":
|
||||
var used = false
|
||||
@ -115,19 +311,19 @@ asyncchecksuite "Sales":
|
||||
availability.duration = request.ask.duration - 1
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
check await wasIgnored()
|
||||
check getAvailability().?size == success availability.size
|
||||
|
||||
test "ignores request when slot size is too small":
|
||||
availability.size = request.ask.slotSize - 1
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
check await wasIgnored()
|
||||
check getAvailability().?size == success availability.size
|
||||
|
||||
test "ignores request when reward is too low":
|
||||
availability.minPrice = request.ask.pricePerSlot + 1
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
check await wasIgnored()
|
||||
check getAvailability().?size == success availability.size
|
||||
|
||||
test "availability remains unused when request is ignored":
|
||||
availability.minPrice = request.ask.pricePerSlot + 1
|
||||
@ -140,7 +336,16 @@ asyncchecksuite "Sales":
|
||||
tooBigCollateral.ask.collateral = availability.maxCollateral + 1
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(tooBigCollateral)
|
||||
check await wasIgnored()
|
||||
check getAvailability().?size == success availability.size
|
||||
|
||||
test "ignores request when slot state is not free":
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
market.slotState[request.slotId(0.u256)] = SlotState.Filled
|
||||
market.slotState[request.slotId(1.u256)] = SlotState.Filled
|
||||
market.slotState[request.slotId(2.u256)] = SlotState.Filled
|
||||
market.slotState[request.slotId(3.u256)] = SlotState.Filled
|
||||
check getAvailability().?size == success availability.size
|
||||
|
||||
test "retrieves and stores data locally":
|
||||
var storingRequest: StorageRequest
|
||||
@ -195,7 +400,7 @@ asyncchecksuite "Sales":
|
||||
test "fills a slot":
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
check eventually market.filled.len == 1
|
||||
check eventuallyCheck market.filled.len == 1
|
||||
check market.filled[0].requestId == request.id
|
||||
check market.filled[0].slotIndex < request.ask.slots.u256
|
||||
check market.filled[0].proof == proof
|
||||
@ -265,7 +470,7 @@ asyncchecksuite "Sales":
|
||||
check proving.slots.len == 0
|
||||
check isOk await reservations.reserve(availability)
|
||||
await market.requestStorage(request)
|
||||
check eventually proving.slots.len == 1
|
||||
check eventuallyCheck proving.slots.len == 1
|
||||
check proving.slots.contains(Slot(request: request, slotIndex: soldSlotIndex))
|
||||
|
||||
test "loads active slots from market":
|
||||
@ -300,16 +505,7 @@ asyncchecksuite "Sales":
|
||||
market.activeRequests[me] = @[request.id]
|
||||
|
||||
await sales.load()
|
||||
let expected = SalesData(requestId: request.id, request: some request)
|
||||
# because sales.load() calls agent.start, we won't know the slotIndex
|
||||
# randomly selected for the agent, and we also won't know the value of
|
||||
# `failed`/`fulfilled`/`cancelled` futures, so we need to compare
|
||||
# the properties we know
|
||||
# TODO: when calling sales.load(), slot index should be restored and not
|
||||
# randomly re-assigned, so this may no longer be needed
|
||||
proc `==` (data0, data1: SalesData): bool =
|
||||
return data0.requestId == data1.requestId and
|
||||
data0.request == data1.request
|
||||
|
||||
check eventually sales.agents.len == 2
|
||||
check sales.agents.all(agent => agent.data == expected)
|
||||
check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256)
|
||||
check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256)
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
import std/sets
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import std/times
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
@ -13,6 +10,7 @@ import pkg/codex/proving
|
||||
import ../helpers/mockmarket
|
||||
import ../helpers/mockclock
|
||||
import ../helpers/eventually
|
||||
import ../helpers
|
||||
import ../examples
|
||||
|
||||
var onCancelCalled = false
|
||||
@ -25,6 +23,7 @@ type
|
||||
MockErrorState = ref object of ErrorHandlingState
|
||||
|
||||
method `$`*(state: MockState): string = "MockState"
|
||||
method `$`*(state: MockErrorState): string = "MockErrorState"
|
||||
|
||||
method onCancelled*(state: MockState, request: StorageRequest): ?State =
|
||||
onCancelCalled = true
|
||||
@ -88,45 +87,26 @@ asyncchecksuite "Sales agent":
|
||||
await agent.retrieveRequest()
|
||||
check agent.data.request == some request
|
||||
|
||||
test "subscribe assigns subscriptions/futures":
|
||||
test "subscribe assigns cancelled future":
|
||||
await agent.subscribe()
|
||||
check not agent.data.cancelled.isNil
|
||||
check not agent.data.failed.isNil
|
||||
check not agent.data.fulfilled.isNil
|
||||
check not agent.data.slotFilled.isNil
|
||||
|
||||
test "unsubscribe deassigns subscriptions/futures":
|
||||
test "unsubscribe deassigns canceleld future":
|
||||
await agent.subscribe()
|
||||
await agent.unsubscribe()
|
||||
check agent.data.cancelled.isNil
|
||||
check agent.data.failed.isNil
|
||||
check agent.data.fulfilled.isNil
|
||||
check agent.data.slotFilled.isNil
|
||||
|
||||
test "subscribe can be called multiple times, without overwriting subscriptions/futures":
|
||||
await agent.subscribe()
|
||||
let cancelled = agent.data.cancelled
|
||||
let failed = agent.data.failed
|
||||
let fulfilled = agent.data.fulfilled
|
||||
let slotFilled = agent.data.slotFilled
|
||||
await agent.subscribe()
|
||||
check cancelled == agent.data.cancelled
|
||||
check failed == agent.data.failed
|
||||
check fulfilled == agent.data.fulfilled
|
||||
check slotFilled == agent.data.slotFilled
|
||||
|
||||
test "unsubscribe can be called multiple times":
|
||||
await agent.subscribe()
|
||||
await agent.unsubscribe()
|
||||
await agent.unsubscribe()
|
||||
|
||||
test "subscribe can be called when request expiry has lapsed":
|
||||
# succeeds when agent.data.fulfilled.isNil
|
||||
request.expiry = (getTime() - initDuration(seconds=1)).toUnix.u256
|
||||
agent.data.request = some request
|
||||
check agent.data.fulfilled.isNil
|
||||
await agent.subscribe()
|
||||
|
||||
test "current state onCancelled called when cancel emitted":
|
||||
let state = MockState.new()
|
||||
agent.start(state)
|
||||
@ -134,22 +114,20 @@ asyncchecksuite "Sales agent":
|
||||
clock.set(request.expiry.truncate(int64))
|
||||
check eventually onCancelCalled
|
||||
|
||||
test "cancelled future is finished (cancelled) when fulfillment emitted":
|
||||
test "cancelled future is finished (cancelled) when onFulfilled called":
|
||||
agent.start(MockState.new())
|
||||
await agent.subscribe()
|
||||
market.emitRequestFulfilled(request.id)
|
||||
agent.onFulfilled(request.id)
|
||||
check eventually agent.data.cancelled.cancelled()
|
||||
|
||||
test "current state onFailed called when failed emitted":
|
||||
test "current state onFailed called when onFailed called":
|
||||
agent.start(MockState.new())
|
||||
await agent.subscribe()
|
||||
market.emitRequestFailed(request.id)
|
||||
agent.onFailed(request.id)
|
||||
check eventually onFailedCalled
|
||||
|
||||
test "current state onSlotFilled called when slot filled emitted":
|
||||
agent.start(MockState.new())
|
||||
await agent.subscribe()
|
||||
market.emitSlotFilled(request.id, slotIndex)
|
||||
agent.onSlotFilled(request.id, slotIndex)
|
||||
check eventually onSlotFilledCalled
|
||||
|
||||
test "ErrorHandlingState.onError can be overridden at the state level":
|
||||
|
||||
451
tests/codex/sales/testslotqueue.nim
Normal file
451
tests/codex/sales/testslotqueue.nim
Normal file
@ -0,0 +1,451 @@
|
||||
import std/sequtils
|
||||
import pkg/asynctest
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/datastore
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import pkg/codex/sales/reservations
|
||||
import pkg/codex/sales/slotqueue
|
||||
import pkg/codex/stores
|
||||
|
||||
import ../helpers
|
||||
import ../helpers/mockmarket
|
||||
import ../helpers/eventually
|
||||
import ../examples
|
||||
|
||||
suite "Slot queue start/stop":
|
||||
|
||||
var repo: RepoStore
|
||||
var repoDs: Datastore
|
||||
var metaDs: SQLiteDatastore
|
||||
var reservations: Reservations
|
||||
var queue: SlotQueue
|
||||
|
||||
setup:
|
||||
repoDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
metaDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
repo = RepoStore.new(repoDs, metaDs)
|
||||
reservations = Reservations.new(repo)
|
||||
queue = SlotQueue.new(reservations)
|
||||
|
||||
teardown:
|
||||
await queue.stop()
|
||||
|
||||
test "starts out not running":
|
||||
check not queue.running
|
||||
|
||||
test "can call start multiple times, and when already running":
|
||||
asyncSpawn queue.start()
|
||||
asyncSpawn queue.start()
|
||||
check queue.running
|
||||
|
||||
test "can call stop when alrady stopped":
|
||||
await queue.stop()
|
||||
check not queue.running
|
||||
|
||||
test "can call stop when running":
|
||||
asyncSpawn queue.start()
|
||||
await queue.stop()
|
||||
check not queue.running
|
||||
|
||||
test "can call stop multiple times":
|
||||
asyncSpawn queue.start()
|
||||
await queue.stop()
|
||||
await queue.stop()
|
||||
check not queue.running
|
||||
|
||||
suite "Slot queue workers":
|
||||
|
||||
var repo: RepoStore
|
||||
var repoDs: Datastore
|
||||
var metaDs: SQLiteDatastore
|
||||
var availability: Availability
|
||||
var reservations: Reservations
|
||||
var queue: SlotQueue
|
||||
|
||||
proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} =
|
||||
await sleepAsync(1000.millis)
|
||||
# this is not illustrative of the realistic scenario as the
|
||||
# `doneProcessing` future would be passed to another context before being
|
||||
# completed and therefore is not as simple as making the callback async
|
||||
doneProcessing.complete()
|
||||
|
||||
setup:
|
||||
let request = StorageRequest.example
|
||||
repoDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
metaDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
let quota = request.ask.slotSize.truncate(uint) * 100 + 1
|
||||
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = quota)
|
||||
reservations = Reservations.new(repo)
|
||||
# create an availability that should always match
|
||||
availability = Availability.init(
|
||||
size = request.ask.slotSize * 100,
|
||||
duration = request.ask.duration * 100,
|
||||
minPrice = request.ask.pricePerSlot div 100,
|
||||
maxCollateral = request.ask.collateral * 100
|
||||
)
|
||||
queue = SlotQueue.new(reservations, maxSize = 5, maxWorkers = 3)
|
||||
queue.onProcessSlot = onProcessSlot
|
||||
discard await reservations.reserve(availability)
|
||||
|
||||
proc startQueue = asyncSpawn queue.start()
|
||||
|
||||
teardown:
|
||||
await queue.stop()
|
||||
|
||||
test "activeWorkers should be 0 when not running":
|
||||
check queue.activeWorkers == 0
|
||||
|
||||
test "maxWorkers cannot be 0":
|
||||
expect ValueError:
|
||||
discard SlotQueue.new(reservations, maxSize = 1, maxWorkers = 0)
|
||||
|
||||
test "maxWorkers cannot surpass maxSize":
|
||||
expect ValueError:
|
||||
discard SlotQueue.new(reservations, maxSize = 1, maxWorkers = 2)
|
||||
|
||||
test "does not surpass max workers":
|
||||
startQueue()
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
let item4 = SlotQueueItem.example
|
||||
check (await queue.push(item1)).isOk
|
||||
check (await queue.push(item2)).isOk
|
||||
check (await queue.push(item3)).isOk
|
||||
check (await queue.push(item4)).isOk
|
||||
check eventually queue.activeWorkers == 3
|
||||
|
||||
test "discards workers once processing completed":
|
||||
proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
await sleepAsync(1.millis)
|
||||
done.complete()
|
||||
|
||||
queue.onProcessSlot = processSlot
|
||||
|
||||
startQueue()
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
let item4 = SlotQueueItem.example
|
||||
check (await queue.push(item1)).isOk # finishes after 1.millis
|
||||
check (await queue.push(item2)).isOk # finishes after 1.millis
|
||||
check (await queue.push(item3)).isOk # finishes after 1.millis
|
||||
check (await queue.push(item4)).isOk
|
||||
check eventually queue.activeWorkers == 1
|
||||
|
||||
suite "Slot queue":
|
||||
|
||||
var onProcessSlotCalled = false
|
||||
var onProcessSlotCalledWith: seq[(RequestId, uint16)]
|
||||
var repo: RepoStore
|
||||
var repoDs: Datastore
|
||||
var metaDs: SQLiteDatastore
|
||||
var availability: Availability
|
||||
var reservations: Reservations
|
||||
var queue: SlotQueue
|
||||
let maxWorkers = 2
|
||||
var unpauseQueue: Future[void]
|
||||
var paused: bool
|
||||
|
||||
proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) =
|
||||
queue = SlotQueue.new(reservations, maxWorkers, maxSize.uint16)
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
await sleepAsync(processSlotDelay)
|
||||
trace "processing item", requestId = item.requestId, slotIndex = item.slotIndex
|
||||
onProcessSlotCalled = true
|
||||
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
|
||||
done.complete()
|
||||
asyncSpawn queue.start()
|
||||
|
||||
setup:
|
||||
onProcessSlotCalled = false
|
||||
onProcessSlotCalledWith = @[]
|
||||
let request = StorageRequest.example
|
||||
repoDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
metaDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
let quota = request.ask.slotSize.truncate(uint) * 100 + 1
|
||||
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = quota)
|
||||
reservations = Reservations.new(repo)
|
||||
# create an availability that should always match
|
||||
availability = Availability.init(
|
||||
size = request.ask.slotSize * 100,
|
||||
duration = request.ask.duration * 100,
|
||||
minPrice = request.ask.pricePerSlot div 100,
|
||||
maxCollateral = request.ask.collateral * 100
|
||||
)
|
||||
discard await reservations.reserve(availability)
|
||||
|
||||
teardown:
|
||||
paused = false
|
||||
|
||||
await queue.stop()
|
||||
|
||||
test "starts out empty":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
check queue.len == 0
|
||||
check $queue == "[]"
|
||||
|
||||
test "reports correct size":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
check queue.size == 2
|
||||
|
||||
test "correctly compares SlotQueueItems":
|
||||
var requestA = StorageRequest.example
|
||||
requestA.ask.duration = 1.u256
|
||||
requestA.ask.reward = 1.u256
|
||||
check requestA.ask.pricePerSlot == 1.u256
|
||||
requestA.ask.collateral = 100000.u256
|
||||
requestA.expiry = 1001.u256
|
||||
|
||||
var requestB = StorageRequest.example
|
||||
requestB.ask.duration = 100.u256
|
||||
requestB.ask.reward = 1000.u256
|
||||
check requestB.ask.pricePerSlot == 100000.u256
|
||||
requestB.ask.collateral = 1.u256
|
||||
requestB.expiry = 1000.u256
|
||||
|
||||
let itemA = SlotQueueItem.init(requestA, 0)
|
||||
let itemB = SlotQueueItem.init(requestB, 0)
|
||||
check itemB < itemA # B higher priority than A
|
||||
check itemA > itemB
|
||||
|
||||
test "expands available all possible slot indices on init":
|
||||
let request = StorageRequest.example
|
||||
let items = SlotQueueItem.init(request)
|
||||
check items.len.uint64 == request.ask.slots
|
||||
var checked = 0
|
||||
for slotIndex in 0'u16..<request.ask.slots.uint16:
|
||||
check items.anyIt(it == SlotQueueItem.init(request, slotIndex))
|
||||
inc checked
|
||||
check checked == items.len
|
||||
|
||||
test "can process items":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
check (await queue.push(item1)).isOk
|
||||
check (await queue.push(item2)).isOk
|
||||
check eventually onProcessSlotCalledWith == @[
|
||||
(item1.requestId, item1.slotIndex),
|
||||
(item2.requestId, item2.slotIndex)
|
||||
]
|
||||
|
||||
test "can push items past number of maxWorkers":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
let item0 = SlotQueueItem.example
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
let item4 = SlotQueueItem.example
|
||||
check isOk (await queue.push(item0))
|
||||
check isOk (await queue.push(item1))
|
||||
check isOk (await queue.push(item2))
|
||||
check isOk (await queue.push(item3))
|
||||
check isOk (await queue.push(item4))
|
||||
|
||||
test "populates item with exisiting request metadata":
|
||||
newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis)
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateral += 1.u256
|
||||
let items0 = SlotQueueItem.init(request0)
|
||||
let items1 = SlotQueueItem.init(request1)
|
||||
check (await queue.push(items0)).isOk
|
||||
check (await queue.push(items1)).isOk
|
||||
let populated = !queue.populateItem(request1.id, 12'u16)
|
||||
check populated.requestId == request1.id
|
||||
check populated.slotIndex == 12'u16
|
||||
check populated.slotSize == request1.ask.slotSize
|
||||
check populated.duration == request1.ask.duration
|
||||
check populated.reward == request1.ask.reward
|
||||
check populated.collateral == request1.ask.collateral
|
||||
|
||||
test "does not find exisiting request metadata":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
let item = SlotQueueItem.example
|
||||
check queue.populateItem(item.requestId, 12'u16).isNone
|
||||
|
||||
test "can support uint16.high slots":
|
||||
var request = StorageRequest.example
|
||||
let maxUInt16 = uint16.high
|
||||
let uint64Slots = uint64(maxUInt16)
|
||||
request.ask.slots = uint64Slots
|
||||
let items = SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
check items.len.uint16 == maxUInt16
|
||||
|
||||
test "cannot support greater than uint16.high slots":
|
||||
var request = StorageRequest.example
|
||||
let int32Slots = uint16.high.int32 + 1
|
||||
let uint64Slots = uint64(int32Slots)
|
||||
request.ask.slots = uint64Slots
|
||||
expect SlotsOutOfRangeError:
|
||||
discard SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
|
||||
test "cannot push duplicate items":
|
||||
newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis)
|
||||
let item0 = SlotQueueItem.example
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
check isOk (await queue.push(item0))
|
||||
check isOk (await queue.push(item1))
|
||||
check (await queue.push(@[item2, item2, item2, item2])).error of SlotQueueItemExistsError
|
||||
|
||||
test "can add items past max maxSize":
|
||||
newSlotQueue(maxSize = 4, maxWorkers = 2, processSlotDelay = 10.millis)
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
let item4 = SlotQueueItem.example
|
||||
check (await queue.push(item1)).isOk
|
||||
check (await queue.push(item2)).isOk
|
||||
check (await queue.push(item3)).isOk
|
||||
check (await queue.push(item4)).isOk
|
||||
check eventually onProcessSlotCalledWith.len == 4
|
||||
|
||||
test "can delete items":
|
||||
newSlotQueue(maxSize = 6, maxWorkers = 2, processSlotDelay = 10.millis)
|
||||
let item0 = SlotQueueItem.example
|
||||
let item1 = SlotQueueItem.example
|
||||
let item2 = SlotQueueItem.example
|
||||
let item3 = SlotQueueItem.example
|
||||
check (await queue.push(item0)).isOk
|
||||
check (await queue.push(item1)).isOk
|
||||
check (await queue.push(item2)).isOk
|
||||
check (await queue.push(item3)).isOk
|
||||
queue.delete(item3)
|
||||
check not queue.contains(item3)
|
||||
|
||||
test "can delete item by request id and slot id":
|
||||
newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis)
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateral += 1.u256
|
||||
let items0 = SlotQueueItem.init(request0)
|
||||
let items1 = SlotQueueItem.init(request1)
|
||||
check (await queue.push(items0)).isOk
|
||||
check (await queue.push(items1)).isOk
|
||||
let last = items1[items1.high]
|
||||
check eventually queue.contains(last)
|
||||
queue.delete(last.requestId, last.slotIndex)
|
||||
check not onProcessSlotCalledWith.anyIt(
|
||||
it == (last.requestId, last.slotIndex)
|
||||
)
|
||||
|
||||
test "can delete all items by request id":
|
||||
newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis)
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateral += 1.u256
|
||||
let items0 = SlotQueueItem.init(request0)
|
||||
let items1 = SlotQueueItem.init(request1)
|
||||
check (await queue.push(items0)).isOk
|
||||
check (await queue.push(items1)).isOk
|
||||
queue.delete(request1.id)
|
||||
check not onProcessSlotCalledWith.anyIt(it[0] == request1.id)
|
||||
|
||||
test "can check if contains item":
|
||||
newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 10.millis)
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
var request2 = StorageRequest.example
|
||||
var request3 = StorageRequest.example
|
||||
var request4 = StorageRequest.example
|
||||
var request5 = StorageRequest.example
|
||||
request1.ask.collateral = request0.ask.collateral + 1
|
||||
request2.ask.collateral = request1.ask.collateral + 1
|
||||
request3.ask.collateral = request2.ask.collateral + 1
|
||||
request4.ask.collateral = request3.ask.collateral + 1
|
||||
request5.ask.collateral = request4.ask.collateral + 1
|
||||
let item0 = SlotQueueItem.init(request0, 0)
|
||||
let item1 = SlotQueueItem.init(request1, 0)
|
||||
let item2 = SlotQueueItem.init(request2, 0)
|
||||
let item3 = SlotQueueItem.init(request3, 0)
|
||||
let item4 = SlotQueueItem.init(request4, 0)
|
||||
let item5 = SlotQueueItem.init(request5, 0)
|
||||
check queue.contains(item5) == false
|
||||
check (await queue.push(@[item0, item1, item2, item3, item4, item5])).isOk
|
||||
check queue.contains(item5)
|
||||
|
||||
test "sorts items by profitability ascending (higher pricePerSlot = higher priority)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
request.ask.reward += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
check item1 < item0
|
||||
|
||||
test "sorts items by collateral ascending (less required collateral = higher priority)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
request.ask.collateral -= 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
check item1 < item0
|
||||
|
||||
test "sorts items by expiry descending (longer expiry = higher priority)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
request.expiry += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
check item1 < item0
|
||||
|
||||
test "sorts items by slot size ascending (smaller dataset = higher priority)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
request.ask.slotSize -= 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
check item1 < item0
|
||||
|
||||
test "should call callback once an item is added":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
let item = SlotQueueItem.example
|
||||
check not onProcessSlotCalled
|
||||
check (await queue.push(item)).isOk
|
||||
check eventually onProcessSlotCalled
|
||||
|
||||
test "should only process item once":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
let item = SlotQueueItem.example
|
||||
check (await queue.push(item)).isOk
|
||||
check eventually onProcessSlotCalledWith == @[
|
||||
(item.requestId, item.slotIndex)
|
||||
]
|
||||
|
||||
test "should process items in correct order":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
# sleeping after push allows the slotqueue loop to iterate,
|
||||
# calling the callback for each pushed/updated item
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
request.ask.reward += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
request.ask.reward += 1.u256
|
||||
let item2 = SlotQueueItem.init(request, 2)
|
||||
request.ask.reward += 1.u256
|
||||
let item3 = SlotQueueItem.init(request, 3)
|
||||
|
||||
check (await queue.push(item0)).isOk
|
||||
await sleepAsync(1.millis)
|
||||
check (await queue.push(item1)).isOk
|
||||
await sleepAsync(1.millis)
|
||||
check (await queue.push(item2)).isOk
|
||||
await sleepAsync(1.millis)
|
||||
check (await queue.push(item3)).isOk
|
||||
|
||||
check eventually (
|
||||
onProcessSlotCalledWith == @[
|
||||
(item0.requestId, item0.slotIndex),
|
||||
(item1.requestId, item1.slotIndex),
|
||||
(item2.requestId, item2.slotIndex),
|
||||
(item3.requestId, item3.slotIndex),
|
||||
]
|
||||
)
|
||||
|
||||
test "fails to push when there's no matching availability":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
discard await reservations.release(availability.id,
|
||||
availability.size.truncate(uint))
|
||||
|
||||
let item = SlotQueueItem.example
|
||||
check (await queue.push(item)).error of NoMatchingAvailabilityError
|
||||
@ -2,7 +2,6 @@ import std/sequtils
|
||||
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
import pkg/contractabi as ca
|
||||
|
||||
|
||||
@ -4,8 +4,9 @@ import std/options
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/asynctest
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/multicodec
|
||||
import pkg/stew/byteutils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/codex/stores/cachestore
|
||||
import pkg/codex/chunker
|
||||
@ -94,7 +95,7 @@ proc commonBlockStoreTests*(name: string,
|
||||
|
||||
var count = 0
|
||||
for c in cids:
|
||||
if cid =? (await c):
|
||||
if cid =? await c:
|
||||
check (await store.hasBlock(cid)).tryGet()
|
||||
count.inc
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@ import std/strutils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/asynctest
|
||||
import pkg/libp2p
|
||||
import pkg/stew/byteutils
|
||||
import pkg/questionable/results
|
||||
import pkg/codex/stores/cachestore
|
||||
|
||||
@ -10,7 +10,6 @@
|
||||
import std/random
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/asynctest
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -8,7 +8,6 @@
|
||||
## those terms.
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/asynctest
|
||||
import pkg/questionable/results
|
||||
import pkg/codex/blocktype as bt
|
||||
|
||||
@ -7,7 +7,6 @@ import pkg/questionable/results
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/asynctest
|
||||
import pkg/libp2p
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stew/endians2
|
||||
import pkg/datastore
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
|
||||
import pkg/asynctest
|
||||
import pkg/stew/byteutils
|
||||
import pkg/codex/chunker
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
import ./helpers
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@ import std/sequtils
|
||||
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/questionable/results
|
||||
|
||||
import pkg/codex/erasure
|
||||
|
||||
@ -3,7 +3,6 @@ import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
import pkg/asynctest
|
||||
import pkg/libp2p
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import pkg/codex/chunker
|
||||
@ -62,6 +61,8 @@ checksuite "Manifest":
|
||||
Block.new(("Block " & $it).toBytes).tryGet().cid
|
||||
)
|
||||
manifest = Manifest.new(blocks).tryGet()
|
||||
|
||||
var
|
||||
protected = Manifest.new(manifest, 2, 2).tryGet()
|
||||
|
||||
check:
|
||||
@ -72,7 +73,7 @@ checksuite "Manifest":
|
||||
|
||||
# fill up with empty Cid's
|
||||
for i in protected.rounded..<protected.len:
|
||||
protected.blocks[i] = EmptyCid[manifest.version]
|
||||
protected[i] = EmptyCid[manifest.version]
|
||||
.catch
|
||||
.get()[manifest.hcodec]
|
||||
.catch
|
||||
|
||||
3
tests/codex/testmerkletree.nim
Normal file
3
tests/codex/testmerkletree.nim
Normal file
@ -0,0 +1,3 @@
|
||||
import ./merkletree/testmerkletree
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
||||
@ -4,11 +4,11 @@ import std/math
|
||||
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import pkg/nitro
|
||||
import pkg/libp2p
|
||||
import pkg/libp2pdht/discv5/protocol as discv5
|
||||
import pkg/codexdht/discv5/protocol as discv5
|
||||
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/blockexchange
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import ./sales/testsales
|
||||
import ./sales/teststates
|
||||
import ./sales/testreservations
|
||||
import ./sales/testslotqueue
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import pkg/chronos
|
||||
import pkg/asynctest
|
||||
import pkg/libp2p
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./helpers
|
||||
|
||||
@ -2,5 +2,7 @@ import ./utils/testoptionalcast
|
||||
import ./utils/testkeyutils
|
||||
import ./utils/testasyncstatemachine
|
||||
import ./utils/testtimer
|
||||
import ./utils/testthen
|
||||
import ./utils/testtrackedfutures
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
||||
|
||||
@ -99,7 +99,7 @@ asyncchecksuite "async state machines":
|
||||
test "stops scheduling and current state":
|
||||
machine.start(State2.new())
|
||||
await sleepAsync(1.millis)
|
||||
machine.stop()
|
||||
await machine.stop()
|
||||
machine.schedule(moveToNextStateEvent)
|
||||
await sleepAsync(1.millis)
|
||||
check runs == [0, 1, 0, 0]
|
||||
@ -130,5 +130,5 @@ asyncchecksuite "async state machines":
|
||||
|
||||
machine.start(State2.new())
|
||||
check eventually machine.query(description).isSome
|
||||
machine.stop()
|
||||
await machine.stop()
|
||||
check machine.query(description).isNone
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import std/unittest
|
||||
import std/os
|
||||
import pkg/libp2p
|
||||
import pkg/questionable/results
|
||||
import pkg/questionable
|
||||
import codex/utils/keyutils
|
||||
import ../helpers
|
||||
|
||||
@ -18,17 +17,17 @@ checksuite "keyutils":
|
||||
os.removeDir(path)
|
||||
|
||||
test "creates a key file when it does not exist yet":
|
||||
check setupKey(path / "keyfile").isSuccess
|
||||
check setupKey(path / "keyfile").isOk
|
||||
check fileExists(path / "keyfile")
|
||||
|
||||
test "stores key in a file that's only readable by the user":
|
||||
discard !setupKey(path / "keyfile")
|
||||
discard setupKey(path / "keyfile").get()
|
||||
when defined(posix):
|
||||
check getFilePermissions(path / "keyfile") == {fpUserRead, fpUserWrite}
|
||||
when defined(windows):
|
||||
check checkCurrentUserOnlyACL(path / "keyfile").get()
|
||||
|
||||
test "reads key file when it does exist":
|
||||
let key = !setupKey(path / "keyfile")
|
||||
check !setupKey(path / "keyfile") == key
|
||||
let key = setupKey(path / "keyfile").get()
|
||||
check setupKey(path / "keyfile").get() == key
|
||||
|
||||
|
||||
413
tests/codex/utils/testthen.nim
Normal file
413
tests/codex/utils/testthen.nim
Normal file
@ -0,0 +1,413 @@
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import codex/utils/then
|
||||
import ../helpers
|
||||
|
||||
proc newError(): ref CatchableError =
|
||||
(ref CatchableError)(msg: "some error")
|
||||
|
||||
asyncchecksuite "then - Future[void]":
|
||||
var error = newError()
|
||||
var future: Future[void]
|
||||
|
||||
setup:
|
||||
future = newFuture[void]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var firedImmediately = false
|
||||
future.complete()
|
||||
discard future.then(proc() = firedImmediately = true)
|
||||
check eventually firedImmediately
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var fired = false
|
||||
discard future.then(proc() = fired = true)
|
||||
future.complete()
|
||||
check eventually fired
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete()
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
future.complete()
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
|
||||
asyncchecksuite "then - Future[T]":
|
||||
var error = newError()
|
||||
var future: Future[int]
|
||||
|
||||
setup:
|
||||
future = newFuture[int]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var cbVal = 0
|
||||
future.complete(1)
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var cbVal = 0
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
future.complete(1)
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete(1)
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
future.complete(1)
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
|
||||
asyncchecksuite "then - Future[?!void]":
|
||||
var error = newError()
|
||||
var future: Future[?!void]
|
||||
|
||||
setup:
|
||||
future = newFuture[?!void]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var firedImmediately = false
|
||||
future.complete(success())
|
||||
discard future.then(proc() = firedImmediately = true)
|
||||
check eventually firedImmediately
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var fired = false
|
||||
discard future.then(proc() = fired = true)
|
||||
future.complete(success())
|
||||
check eventually fired
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete(success())
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc() = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
.then(proc() = inc onSuccessCalledTimes)
|
||||
future.complete(success())
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
|
||||
asyncchecksuite "then - Future[?!T]":
|
||||
var error = newError()
|
||||
var future: Future[?!int]
|
||||
|
||||
setup:
|
||||
future = newFuture[?!int]("test void")
|
||||
|
||||
teardown:
|
||||
if not future.finished:
|
||||
raiseAssert "test should finish future"
|
||||
|
||||
test "then callback is fired when future is already finished":
|
||||
var cbVal = 0
|
||||
future.complete(success(1))
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "then callback is fired after future is finished":
|
||||
var cbVal = 0
|
||||
discard future.then(proc(val: int) = cbVal = val)
|
||||
future.complete(success(1))
|
||||
check eventually cbVal == 1
|
||||
|
||||
test "catch callback is fired when future is already failed":
|
||||
var actual: ref CatchableError
|
||||
future.fail(error)
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
check eventually actual == error
|
||||
|
||||
test "catch callback is fired after future is failed":
|
||||
var actual: ref CatchableError
|
||||
future.catch(proc(err: ref CatchableError) = actual = err)
|
||||
future.fail(error)
|
||||
check eventually actual == error
|
||||
|
||||
test "cancelled callback is fired when future is already cancelled":
|
||||
var fired = false
|
||||
await future.cancelAndWait()
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
check eventually fired
|
||||
|
||||
test "cancelled callback is fired after future is cancelled":
|
||||
var fired = false
|
||||
discard future.cancelled(proc() = fired = true)
|
||||
await future.cancelAndWait()
|
||||
check eventually fired
|
||||
|
||||
test "does not fire other callbacks when successful":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.complete(success(1))
|
||||
|
||||
check eventually onSuccessCalled
|
||||
check always (not onCancelledCalled and not onCatchCalled)
|
||||
|
||||
test "does not fire other callbacks when fails":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
future.fail(error)
|
||||
|
||||
check eventually onCatchCalled
|
||||
check always (not onCancelledCalled and not onSuccessCalled)
|
||||
|
||||
test "does not fire other callbacks when cancelled":
|
||||
var onSuccessCalled = false
|
||||
var onCancelledCalled = false
|
||||
var onCatchCalled = false
|
||||
|
||||
future
|
||||
.then(proc(val: int) = onSuccessCalled = true)
|
||||
.cancelled(proc() = onCancelledCalled = true)
|
||||
.catch(proc(e: ref CatchableError) = onCatchCalled = true)
|
||||
|
||||
await future.cancelAndWait()
|
||||
|
||||
check eventually onCancelledCalled
|
||||
check always (not onSuccessCalled and not onCatchCalled)
|
||||
|
||||
test "can chain onSuccess when future completes":
|
||||
var onSuccessCalledTimes = 0
|
||||
discard future
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
.then(proc(val: int) = inc onSuccessCalledTimes)
|
||||
future.complete(success(1))
|
||||
check eventually onSuccessCalledTimes == 3
|
||||
67
tests/codex/utils/testtrackedfutures.nim
Normal file
67
tests/codex/utils/testtrackedfutures.nim
Normal file
@ -0,0 +1,67 @@
|
||||
import pkg/asynctest
|
||||
import pkg/chronos
|
||||
import codex/utils/trackedfutures
|
||||
import ../helpers/eventually
|
||||
import ../helpers
|
||||
|
||||
type Module = object
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
asyncchecksuite "tracked futures":
|
||||
var module: Module
|
||||
|
||||
setup:
|
||||
module = Module(trackedFutures: TrackedFutures.new())
|
||||
|
||||
test "starts with zero tracked futures":
|
||||
check module.trackedFutures.len == 0
|
||||
|
||||
test "tracks unfinished futures":
|
||||
let fut = newFuture[void]("test")
|
||||
discard fut.track(module)
|
||||
check module.trackedFutures.len == 1
|
||||
|
||||
test "does not track completed futures":
|
||||
let fut = newFuture[void]("test")
|
||||
fut.complete()
|
||||
discard fut.track(module)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "does not track failed futures":
|
||||
let fut = newFuture[void]("test")
|
||||
fut.fail((ref CatchableError)(msg: "some error"))
|
||||
discard fut.track(module)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "does not track cancelled futures":
|
||||
let fut = newFuture[void]("test")
|
||||
await fut.cancelAndWait()
|
||||
discard fut.track(module)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "removes tracked future when finished":
|
||||
let fut = newFuture[void]("test")
|
||||
discard fut.track(module)
|
||||
fut.complete()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "removes tracked future when cancelled":
|
||||
let fut = newFuture[void]("test")
|
||||
discard fut.track(module)
|
||||
await fut.cancelAndWait()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "cancels and removes all tracked futures":
|
||||
let fut1 = newFuture[void]("test1")
|
||||
let fut2 = newFuture[void]("test2")
|
||||
let fut3 = newFuture[void]("test3")
|
||||
discard fut1.track(module)
|
||||
discard fut2.track(module)
|
||||
discard fut3.track(module)
|
||||
await module.trackedFutures.cancelTracked()
|
||||
check eventually fut1.cancelled
|
||||
check eventually fut2.cancelled
|
||||
check eventually fut3.cancelled
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user